blob: 858bb473bd42c45cce55c3df9f29d379e329c1f7 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530250 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Sathya Perla5a712c12013-07-23 15:24:59 +0530259 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * privilege or if PF did not provision the new MAC address.
261 * On BE3, this cmd will always fail if the VF doesn't have the
262 * FILTMGMT privilege. This failure is OK, only if the PF programmed
263 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000264 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530265 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266 adapter->if_handle, &adapter->pmac_id[0], 0);
267 if (!status) {
268 curr_pmac_id = adapter->pmac_id[0];
269
270 /* Delete the old programmed MAC. This call may fail if the
271 * old MAC was already deleted by the PF driver.
272 */
273 if (adapter->pmac_id[0] != old_pmac_id)
274 be_cmd_pmac_del(adapter, adapter->if_handle,
275 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000276 }
277
Sathya Perla5a712c12013-07-23 15:24:59 +0530278 /* Decide if the new MAC is successfully activated only after
279 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000280 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530281 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000282 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000283 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The MAC change did not happen, either due to lack of privilege
286 * or PF didn't pre-provision.
287 */
288 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289 status = -EPERM;
290 goto err;
291 }
292
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530294 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 return 0;
296err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe32012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe32012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe32012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe32012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530785
786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795
796 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000798 if (unlikely(!skb))
799 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000800 skb->vlan_tci = 0;
801 }
802
803 /* Insert the outer VLAN, if any */
804 if (adapter->qnq_vid) {
805 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400806 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000807 if (unlikely(!skb))
808 return skb;
809 if (skip_hw_vlan)
810 *skip_hw_vlan = true;
811 }
812
Somnath Kotur93040ae2012-06-26 22:32:10 +0000813 return skb;
814}
815
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817{
818 struct ethhdr *eh = (struct ethhdr *)skb->data;
819 u16 offset = ETH_HLEN;
820
821 if (eh->h_proto == htons(ETH_P_IPV6)) {
822 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824 offset += sizeof(struct ipv6hdr);
825 if (ip6h->nexthdr != NEXTHDR_TCP &&
826 ip6h->nexthdr != NEXTHDR_UDP) {
827 struct ipv6_opt_hdr *ehdr =
828 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831 if (ehdr->hdrlen == 0xff)
832 return true;
833 }
834 }
835 return false;
836}
837
838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839{
840 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000845{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000846 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000847}
848
Sathya Perlaee9c7992013-05-22 23:04:55 +0000849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850 struct sk_buff *skb,
851 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000853 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000854 unsigned int eth_hdr_len;
855 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000856
Somnath Kotur48265662013-05-26 21:08:47 +0000857 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858 * may cause a transmit stall on that port. So the work-around is to
859 * pad such packets to a 36-byte length.
860 */
861 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862 if (skb_padto(skb, 36))
863 goto tx_drop;
864 skb->len = 36;
865 }
866
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000869 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000870 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000871 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000873 if (skb->len <= 60 &&
874 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000875 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000876 ip = (struct iphdr *)ip_hdr(skb);
877 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878 }
879
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000880 /* If vlan tag is already inlined in the packet, skip HW VLAN
881 * tagging in UMC mode
882 */
883 if ((adapter->function_mode & UMC_ENABLED) &&
884 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000885 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000886
Somnath Kotur93040ae2012-06-26 22:32:10 +0000887 /* HW has a bug wherein it will calculate CSUM for VLAN
888 * pkts even though it is disabled.
889 * Manually insert VLAN in pkt.
890 */
891 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000892 vlan_tx_tag_present(skb)) {
893 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 if (unlikely(!skb))
895 goto tx_drop;
896 }
897
898 /* HW may lockup when VLAN HW tagging is requested on
899 * certain ipv6 packets. Drop such pkts if the HW workaround to
900 * skip HW tagging is not enabled by FW.
901 */
902 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000903 (adapter->pvid || adapter->qnq_vid) &&
904 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000905 goto tx_drop;
906
907 /* Manual VLAN tag insertion to prevent:
908 * ASIC lockup when the ASIC inserts VLAN tag into
909 * certain ipv6 packets. Insert VLAN tags in driver,
910 * and set event, completion, vlan bits accordingly
911 * in the Tx WRB.
912 */
913 if (be_ipv6_tx_stall_chk(adapter, skb) &&
914 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000915 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000916 if (unlikely(!skb))
917 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000918 }
919
Sathya Perlaee9c7992013-05-22 23:04:55 +0000920 return skb;
921tx_drop:
922 dev_kfree_skb_any(skb);
923 return NULL;
924}
925
926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930 struct be_queue_info *txq = &txo->q;
931 bool dummy_wrb, stopped = false;
932 u32 wrb_cnt = 0, copied = 0;
933 bool skip_hw_vlan = false;
934 u32 start = txq->head;
935
936 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937 if (!skb)
938 return NETDEV_TX_OK;
939
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000940 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000942 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000944 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000945 int gso_segs = skb_shinfo(skb)->gso_segs;
946
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000947 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000948 BUG_ON(txo->sent_skb_list[start]);
949 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000951 /* Ensure txq has space for the next skb; Else stop the queue
952 * *BEFORE* ringing the tx doorbell, so that we serialze the
953 * tx compls of the current transmit which'll wake up the queue
954 */
Sathya Perla7101e112010-03-22 20:41:12 +0000955 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000956 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000958 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000959 stopped = true;
960 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000962 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000963
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000964 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000965 } else {
966 txq->head = start;
967 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700968 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 return NETDEV_TX_OK;
970}
971
972static int be_change_mtu(struct net_device *netdev, int new_mtu)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000976 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978 dev_info(&adapter->pdev->dev,
979 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000980 BE_MIN_MTU,
981 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 return -EINVAL;
983 }
984 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985 netdev->mtu, new_mtu);
986 netdev->mtu = new_mtu;
987 return 0;
988}
989
990/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993 */
Sathya Perla10329df2012-06-05 19:37:18 +0000994static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995{
Sathya Perla10329df2012-06-05 19:37:18 +0000996 u16 vids[BE_NUM_VLANS_SUPPORTED];
997 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000998 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001000 /* No need to further configure vids if in promiscuous mode */
1001 if (adapter->promiscuous)
1002 return 0;
1003
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001004 if (adapter->vlans_added > adapter->max_vlans)
1005 goto set_vlan_promisc;
1006
1007 /* Construct VLAN Table to give to HW */
1008 for (i = 0; i < VLAN_N_VID; i++)
1009 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001010 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001011
1012 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001013 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001014
1015 /* Set to VLAN promisc mode as setting VLAN filter failed */
1016 if (status) {
1017 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001021
Sathya Perlab31c50a2009-09-17 10:30:13 -07001022 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001023
1024set_vlan_promisc:
1025 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026 NULL, 0, 1, 1);
1027 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028}
1029
Patrick McHardy80d5c362013-04-19 02:04:28 +00001030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031{
1032 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001033 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001035 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001036 status = -EINVAL;
1037 goto ret;
1038 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001039
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001040 /* Packets with VID 0 are always received by Lancer by default */
1041 if (lancer_chip(adapter) && vid == 0)
1042 goto ret;
1043
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001045 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001046 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001047
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001048 if (!status)
1049 adapter->vlans_added++;
1050 else
1051 adapter->vlan_tag[vid] = 0;
1052ret:
1053 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054}
1055
Patrick McHardy80d5c362013-04-19 02:04:28 +00001056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001059 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001061 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001062 status = -EINVAL;
1063 goto ret;
1064 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001065
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001066 /* Packets with VID 0 are always received by Lancer by default */
1067 if (lancer_chip(adapter) && vid == 0)
1068 goto ret;
1069
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001071 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001072 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001073
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001074 if (!status)
1075 adapter->vlans_added--;
1076 else
1077 adapter->vlan_tag[vid] = 1;
1078ret:
1079 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080}
1081
Sathya Perlaa54769f2011-10-24 02:45:00 +00001082static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086
1087 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001088 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001089 adapter->promiscuous = true;
1090 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001092
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001093 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001094 if (adapter->promiscuous) {
1095 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001096 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001097
1098 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001099 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001100 }
1101
Sathya Perlae7b909a2009-11-22 22:01:10 +00001102 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001103 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001104 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001106 goto done;
1107 }
1108
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001109 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110 struct netdev_hw_addr *ha;
1111 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114 be_cmd_pmac_del(adapter, adapter->if_handle,
1115 adapter->pmac_id[i], 0);
1116 }
1117
1118 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true;
1121 goto done;
1122 }
1123
1124 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125 adapter->uc_macs++; /* First slot is for Primary MAC */
1126 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127 adapter->if_handle,
1128 &adapter->pmac_id[adapter->uc_macs], 0);
1129 }
1130 }
1131
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001132 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135 if (status) {
1136 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001140done:
1141 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142}
1143
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001148 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001149 bool active_mac = false;
1150 u32 pmac_id;
1151 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001152
Sathya Perla11ac75e2011-12-13 00:58:50 +00001153 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001154 return -EPERM;
1155
Sathya Perla11ac75e2011-12-13 00:58:50 +00001156 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001157 return -EINVAL;
1158
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001159 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001160 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1161 &pmac_id, vf + 1);
1162 if (!status && active_mac)
1163 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1164 pmac_id, vf + 1);
1165
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001166 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1167 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001168 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1169 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001170
Sathya Perla11ac75e2011-12-13 00:58:50 +00001171 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1172 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001173 }
1174
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001175 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001176 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1177 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001178 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001179 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001180
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001181 return status;
1182}
1183
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001184static int be_get_vf_config(struct net_device *netdev, int vf,
1185 struct ifla_vf_info *vi)
1186{
1187 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001188 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001189
Sathya Perla11ac75e2011-12-13 00:58:50 +00001190 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001191 return -EPERM;
1192
Sathya Perla11ac75e2011-12-13 00:58:50 +00001193 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001194 return -EINVAL;
1195
1196 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001197 vi->tx_rate = vf_cfg->tx_rate;
1198 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001199 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001200 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001201
1202 return 0;
1203}
1204
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001205static int be_set_vf_vlan(struct net_device *netdev,
1206 int vf, u16 vlan, u8 qos)
1207{
1208 struct be_adapter *adapter = netdev_priv(netdev);
1209 int status = 0;
1210
Sathya Perla11ac75e2011-12-13 00:58:50 +00001211 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001212 return -EPERM;
1213
Sathya Perla11ac75e2011-12-13 00:58:50 +00001214 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001215 return -EINVAL;
1216
1217 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001218 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1219 /* If this is new value, program it. Else skip. */
1220 adapter->vf_cfg[vf].vlan_tag = vlan;
1221
1222 status = be_cmd_set_hsw_config(adapter, vlan,
1223 vf + 1, adapter->vf_cfg[vf].if_handle);
1224 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001225 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001226 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001227 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001228 vlan = adapter->vf_cfg[vf].def_vid;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001231 }
1232
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001233
1234 if (status)
1235 dev_info(&adapter->pdev->dev,
1236 "VLAN %d config on VF %d failed\n", vlan, vf);
1237 return status;
1238}
1239
Ajit Khapardee1d18732010-07-23 01:52:13 +00001240static int be_set_vf_tx_rate(struct net_device *netdev,
1241 int vf, int rate)
1242{
1243 struct be_adapter *adapter = netdev_priv(netdev);
1244 int status = 0;
1245
Sathya Perla11ac75e2011-12-13 00:58:50 +00001246 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001247 return -EPERM;
1248
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001249 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001250 return -EINVAL;
1251
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001252 if (rate < 100 || rate > 10000) {
1253 dev_err(&adapter->pdev->dev,
1254 "tx rate must be between 100 and 10000 Mbps\n");
1255 return -EINVAL;
1256 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001257
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001258 if (lancer_chip(adapter))
1259 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1260 else
1261 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001262
1263 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001264 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001265 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001266 else
1267 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001268 return status;
1269}
1270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001274 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001275 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001276 u64 pkts;
1277 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 if (!eqo->enable_aic) {
1280 eqd = eqo->eqd;
1281 goto modify_eqd;
1282 }
1283
1284 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001285 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001287 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
Sathya Perla4097f662009-03-24 16:40:13 -07001289 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001290 if (time_before(now, stats->rx_jiffies)) {
1291 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001292 return;
1293 }
1294
Sathya Perlaac124ff2011-07-25 19:10:14 +00001295 /* Update once a second */
1296 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001297 return;
1298
Sathya Perlaab1594e2011-07-25 19:10:15 +00001299 do {
1300 start = u64_stats_fetch_begin_bh(&stats->sync);
1301 pkts = stats->rx_pkts;
1302 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001304 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001305 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001306 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001307 eqd = (stats->rx_pps / 110000) << 3;
1308 eqd = min(eqd, eqo->max_eqd);
1309 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001310 if (eqd < 10)
1311 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312
1313modify_eqd:
1314 if (eqd != eqo->cur_eqd) {
1315 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001317 }
Sathya Perla4097f662009-03-24 16:40:13 -07001318}
1319
Sathya Perla3abcded2010-10-03 22:12:27 -07001320static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001321 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001322{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001323 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001324
Sathya Perlaab1594e2011-07-25 19:10:15 +00001325 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001326 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001327 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001328 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001329 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001330 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001332 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001333 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334}
1335
Sathya Perla2e588f82011-03-11 02:49:26 +00001336static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001337{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001338 /* L4 checksum is not reliable for non TCP/UDP packets.
1339 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001340 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001342}
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001347 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001349 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
Sathya Perla3abcded2010-10-03 22:12:27 -07001351 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 BUG_ON(!rx_page_info->page);
1353
Ajit Khaparde205859a2010-02-09 01:34:21 +00001354 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001355 dma_unmap_page(&adapter->pdev->dev,
1356 dma_unmap_addr(rx_page_info, bus),
1357 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001358 rx_page_info->last_page_user = false;
1359 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
1361 atomic_dec(&rxq->used);
1362 return rx_page_info;
1363}
1364
1365/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368{
Sathya Perla3abcded2010-10-03 22:12:27 -07001369 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001371 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001373 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001377 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378 }
1379}
1380
1381/*
1382 * skb_fill_rx_data forms a complete skb for an ether frame
1383 * indicated by rxcp.
1384 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001385static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387{
Sathya Perla3abcded2010-10-03 22:12:27 -07001388 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001390 u16 i, j;
1391 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 u8 *start;
1393
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001394 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 start = page_address(page_info->page) + page_info->page_offset;
1396 prefetch(start);
1397
1398 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001399 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 skb->len = curr_frag_len;
1402 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001403 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 /* Complete packet has now been moved to data */
1405 put_page(page_info->page);
1406 skb->data_len = 0;
1407 skb->tail += curr_frag_len;
1408 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001409 hdr_len = ETH_HLEN;
1410 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001412 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 skb_shinfo(skb)->frags[0].page_offset =
1414 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001415 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001417 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 skb->tail += hdr_len;
1419 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001420 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 if (rxcp->pkt_size <= rx_frag_size) {
1423 BUG_ON(rxcp->num_rcvd != 1);
1424 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 }
1426
1427 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 index_inc(&rxcp->rxq_idx, rxq->len);
1429 remaining = rxcp->pkt_size - curr_frag_len;
1430 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001431 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001434 /* Coalesce all frags from the same physical page in one slot */
1435 if (page_info->page_offset == 0) {
1436 /* Fresh page */
1437 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001438 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001439 skb_shinfo(skb)->frags[j].page_offset =
1440 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001441 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001442 skb_shinfo(skb)->nr_frags++;
1443 } else {
1444 put_page(page_info->page);
1445 }
1446
Eric Dumazet9e903e02011-10-18 21:00:24 +00001447 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 skb->len += curr_frag_len;
1449 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001450 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001451 remaining -= curr_frag_len;
1452 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001453 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001455 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456}
1457
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001458/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459static void be_rx_compl_process(struct be_rx_obj *rxo,
1460 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001462 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001463 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001465
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001466 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001467 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001468 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001469 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 return;
1471 }
1472
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001473 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001475 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001476 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001477 else
1478 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001480 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001481 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001482 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001483 skb->rxhash = rxcp->rss_hash;
1484
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485
Jiri Pirko343e43c2011-08-25 02:50:51 +00001486 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001487 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001488
1489 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001492/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001496 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001500 u16 remaining, curr_frag_len;
1501 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001502
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001503 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001504 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001506 return;
1507 }
1508
Sathya Perla2e588f82011-03-11 02:49:26 +00001509 remaining = rxcp->pkt_size;
1510 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512
1513 curr_frag_len = min(remaining, rx_frag_size);
1514
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001515 /* Coalesce all frags from the same physical page in one slot */
1516 if (i == 0 || page_info->page_offset == 0) {
1517 /* First frag or Fresh page */
1518 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001519 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001520 skb_shinfo(skb)->frags[j].page_offset =
1521 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001522 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001523 } else {
1524 put_page(page_info->page);
1525 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001526 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001527 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530 memset(page_info, 0, sizeof(*page_info));
1531 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001532 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001534 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001535 skb->len = rxcp->pkt_size;
1536 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001537 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001538 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001539 if (adapter->netdev->features & NETIF_F_RXHASH)
1540 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001541
Jiri Pirko343e43c2011-08-25 02:50:51 +00001542 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001544
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001545 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546}
1547
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001548static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550{
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 rxcp->pkt_size =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001556 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001557 rxcp->ip_csum =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559 rxcp->l4_csum =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561 rxcp->ipv6 =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563 rxcp->rxq_idx =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565 rxcp->num_rcvd =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567 rxcp->pkt_type =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001569 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001571 if (rxcp->vlanf) {
1572 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001573 compl);
1574 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001576 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001577 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001578}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001582{
1583 rxcp->pkt_size =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001588 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001589 rxcp->ip_csum =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591 rxcp->l4_csum =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593 rxcp->ipv6 =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595 rxcp->rxq_idx =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597 rxcp->num_rcvd =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599 rxcp->pkt_type =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001601 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001603 if (rxcp->vlanf) {
1604 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001605 compl);
1606 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001608 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001609 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001610 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1611 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001612}
1613
1614static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1615{
1616 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1617 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1618 struct be_adapter *adapter = rxo->adapter;
1619
1620 /* For checking the valid bit it is Ok to use either definition as the
1621 * valid bit is at the same position in both v0 and v1 Rx compl */
1622 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623 return NULL;
1624
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001625 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001626 be_dws_le_to_cpu(compl, sizeof(*compl));
1627
1628 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001629 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001630 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001631 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001632
Somnath Koture38b1702013-05-29 22:55:56 +00001633 if (rxcp->ip_frag)
1634 rxcp->l4_csum = 0;
1635
Sathya Perla15d72182011-03-21 20:49:26 +00001636 if (rxcp->vlanf) {
1637 /* vlanf could be wrongly set in some cards.
1638 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001639 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001640 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001641
Sathya Perla15d72182011-03-21 20:49:26 +00001642 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001643 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001644
Somnath Kotur939cf302011-08-18 21:51:49 -07001645 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001646 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001647 rxcp->vlanf = 0;
1648 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001649
1650 /* As the compl has been parsed, reset it; we wont touch it again */
1651 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652
Sathya Perla3abcded2010-10-03 22:12:27 -07001653 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 return rxcp;
1655}
1656
Eric Dumazet1829b082011-03-01 05:48:12 +00001657static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001662 gfp |= __GFP_COMP;
1663 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664}
1665
1666/*
1667 * Allocate a page, split it to fragments of size rx_frag_size and post as
1668 * receive buffers to BE
1669 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001670static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671{
Sathya Perla3abcded2010-10-03 22:12:27 -07001672 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001673 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001674 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 struct page *pagep = NULL;
1676 struct be_eth_rx_d *rxd;
1677 u64 page_dmaaddr = 0, frag_dmaaddr;
1678 u32 posted, page_offset = 0;
1679
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1682 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001683 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001685 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 break;
1687 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001688 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1689 0, adapter->big_page_size,
1690 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 page_info->page_offset = 0;
1692 } else {
1693 get_page(pagep);
1694 page_info->page_offset = page_offset + rx_frag_size;
1695 }
1696 page_offset = page_info->page_offset;
1697 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001698 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1700
1701 rxd = queue_head_node(rxq);
1702 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1703 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704
1705 /* Any space left in the current big page for another frag? */
1706 if ((page_offset + rx_frag_size + rx_frag_size) >
1707 adapter->big_page_size) {
1708 pagep = NULL;
1709 page_info->last_page_user = true;
1710 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001711
1712 prev_page_info = page_info;
1713 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 }
1716 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001717 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718
1719 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001721 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001722 } else if (atomic_read(&rxq->used) == 0) {
1723 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726}
1727
Sathya Perla5fb379e2009-06-18 00:02:59 +00001728static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1731
1732 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1733 return NULL;
1734
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001735 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1737
1738 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1739
1740 queue_tail_inc(tx_cq);
1741 return txcp;
1742}
1743
Sathya Perla3c8def92011-06-12 20:01:58 +00001744static u16 be_tx_compl_process(struct be_adapter *adapter,
1745 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746{
Sathya Perla3c8def92011-06-12 20:01:58 +00001747 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001748 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001749 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001751 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1752 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001754 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001756 sent_skbs[txq->tail] = NULL;
1757
1758 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001759 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001761 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001763 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001764 unmap_tx_frag(&adapter->pdev->dev, wrb,
1765 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001766 unmap_skb_hdr = false;
1767
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768 num_wrbs++;
1769 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001770 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001773 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774}
1775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001776/* Return the number of events in the event queue */
1777static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001778{
1779 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001780 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001781
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001782 do {
1783 eqe = queue_tail_node(&eqo->q);
1784 if (eqe->evt == 0)
1785 break;
1786
1787 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001788 eqe->evt = 0;
1789 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001790 queue_tail_inc(&eqo->q);
1791 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001792
1793 return num;
1794}
1795
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001796/* Leaves the EQ is disarmed state */
1797static void be_eq_clean(struct be_eq_obj *eqo)
1798{
1799 int num = events_get(eqo);
1800
1801 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1802}
1803
1804static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805{
1806 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001807 struct be_queue_info *rxq = &rxo->q;
1808 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001809 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001810 struct be_adapter *adapter = rxo->adapter;
1811 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812 u16 tail;
1813
Sathya Perlad23e9462012-12-17 19:38:51 +00001814 /* Consume pending rx completions.
1815 * Wait for the flush completion (identified by zero num_rcvd)
1816 * to arrive. Notify CQ even when there are no more CQ entries
1817 * for HW to flush partially coalesced CQ entries.
1818 * In Lancer, there is no need to wait for flush compl.
1819 */
1820 for (;;) {
1821 rxcp = be_rx_compl_get(rxo);
1822 if (rxcp == NULL) {
1823 if (lancer_chip(adapter))
1824 break;
1825
1826 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1827 dev_warn(&adapter->pdev->dev,
1828 "did not receive flush compl\n");
1829 break;
1830 }
1831 be_cq_notify(adapter, rx_cq->id, true, 0);
1832 mdelay(1);
1833 } else {
1834 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001835 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001836 if (rxcp->num_rcvd == 0)
1837 break;
1838 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 }
1840
Sathya Perlad23e9462012-12-17 19:38:51 +00001841 /* After cleanup, leave the CQ in unarmed state */
1842 be_cq_notify(adapter, rx_cq->id, false, 0);
1843
1844 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001846 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001847 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 put_page(page_info->page);
1849 memset(page_info, 0, sizeof(*page_info));
1850 }
1851 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001852 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853}
1854
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001855static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001857 struct be_tx_obj *txo;
1858 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001859 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001860 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001861 struct sk_buff *sent_skb;
1862 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001863 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864
Sathya Perlaa8e91792009-08-10 03:42:43 +00001865 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1866 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001867 pending_txqs = adapter->num_tx_qs;
1868
1869 for_all_tx_queues(adapter, txo, i) {
1870 txq = &txo->q;
1871 while ((txcp = be_tx_compl_get(&txo->cq))) {
1872 end_idx =
1873 AMAP_GET_BITS(struct amap_eth_tx_compl,
1874 wrb_index, txcp);
1875 num_wrbs += be_tx_compl_process(adapter, txo,
1876 end_idx);
1877 cmpl++;
1878 }
1879 if (cmpl) {
1880 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1881 atomic_sub(num_wrbs, &txq->used);
1882 cmpl = 0;
1883 num_wrbs = 0;
1884 }
1885 if (atomic_read(&txq->used) == 0)
1886 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001887 }
1888
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001889 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001890 break;
1891
1892 mdelay(1);
1893 } while (true);
1894
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001895 for_all_tx_queues(adapter, txo, i) {
1896 txq = &txo->q;
1897 if (atomic_read(&txq->used))
1898 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1899 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001900
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001901 /* free posted tx for which compls will never arrive */
1902 while (atomic_read(&txq->used)) {
1903 sent_skb = txo->sent_skb_list[txq->tail];
1904 end_idx = txq->tail;
1905 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1906 &dummy_wrb);
1907 index_adv(&end_idx, num_wrbs - 1, txq->len);
1908 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1909 atomic_sub(num_wrbs, &txq->used);
1910 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001911 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912}
1913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914static void be_evt_queues_destroy(struct be_adapter *adapter)
1915{
1916 struct be_eq_obj *eqo;
1917 int i;
1918
1919 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001920 if (eqo->q.created) {
1921 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001923 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001924 be_queue_free(adapter, &eqo->q);
1925 }
1926}
1927
1928static int be_evt_queues_create(struct be_adapter *adapter)
1929{
1930 struct be_queue_info *eq;
1931 struct be_eq_obj *eqo;
1932 int i, rc;
1933
1934 adapter->num_evt_qs = num_irqs(adapter);
1935
1936 for_all_evt_queues(adapter, eqo, i) {
1937 eqo->adapter = adapter;
1938 eqo->tx_budget = BE_TX_BUDGET;
1939 eqo->idx = i;
1940 eqo->max_eqd = BE_MAX_EQD;
1941 eqo->enable_aic = true;
1942
1943 eq = &eqo->q;
1944 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1945 sizeof(struct be_eq_entry));
1946 if (rc)
1947 return rc;
1948
1949 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1950 if (rc)
1951 return rc;
1952 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001953 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954}
1955
Sathya Perla5fb379e2009-06-18 00:02:59 +00001956static void be_mcc_queues_destroy(struct be_adapter *adapter)
1957{
1958 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001959
Sathya Perla8788fdc2009-07-27 22:52:03 +00001960 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001961 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001962 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001963 be_queue_free(adapter, q);
1964
Sathya Perla8788fdc2009-07-27 22:52:03 +00001965 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001966 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001967 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001968 be_queue_free(adapter, q);
1969}
1970
1971/* Must be called only after TX qs are created as MCC shares TX EQ */
1972static int be_mcc_queues_create(struct be_adapter *adapter)
1973{
1974 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001975
Sathya Perla8788fdc2009-07-27 22:52:03 +00001976 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001978 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001979 goto err;
1980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 /* Use the default EQ for MCC completions */
1982 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001983 goto mcc_cq_free;
1984
Sathya Perla8788fdc2009-07-27 22:52:03 +00001985 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001986 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1987 goto mcc_cq_destroy;
1988
Sathya Perla8788fdc2009-07-27 22:52:03 +00001989 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001990 goto mcc_q_free;
1991
1992 return 0;
1993
1994mcc_q_free:
1995 be_queue_free(adapter, q);
1996mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001997 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001998mcc_cq_free:
1999 be_queue_free(adapter, cq);
2000err:
2001 return -1;
2002}
2003
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004static void be_tx_queues_destroy(struct be_adapter *adapter)
2005{
2006 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002007 struct be_tx_obj *txo;
2008 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009
Sathya Perla3c8def92011-06-12 20:01:58 +00002010 for_all_tx_queues(adapter, txo, i) {
2011 q = &txo->q;
2012 if (q->created)
2013 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2014 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015
Sathya Perla3c8def92011-06-12 20:01:58 +00002016 q = &txo->cq;
2017 if (q->created)
2018 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2019 be_queue_free(adapter, q);
2020 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021}
2022
Sathya Perladafc0fe2011-10-24 02:45:02 +00002023static int be_num_txqs_want(struct be_adapter *adapter)
2024{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002025 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2026 be_is_mc(adapter) ||
2027 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00002028 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002029 return 1;
2030 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002031 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002032}
2033
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 struct be_queue_info *cq, *eq;
2037 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002038 struct be_tx_obj *txo;
2039 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040
Sathya Perladafc0fe2011-10-24 02:45:02 +00002041 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002042 if (adapter->num_tx_qs != MAX_TX_QS) {
2043 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002044 netif_set_real_num_tx_queues(adapter->netdev,
2045 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002046 rtnl_unlock();
2047 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002048
Sathya Perla3c8def92011-06-12 20:01:58 +00002049 for_all_tx_queues(adapter, txo, i) {
2050 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2052 sizeof(struct be_eth_tx_compl));
2053 if (status)
2054 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056 /* If num_evt_qs is less than num_tx_qs, then more than
2057 * one txq share an eq
2058 */
2059 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2060 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2061 if (status)
2062 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002063 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065}
2066
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067static int be_tx_qs_create(struct be_adapter *adapter)
2068{
2069 struct be_tx_obj *txo;
2070 int i, status;
2071
2072 for_all_tx_queues(adapter, txo, i) {
2073 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2074 sizeof(struct be_eth_wrb));
2075 if (status)
2076 return status;
2077
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002078 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 if (status)
2080 return status;
2081 }
2082
Sathya Perlad3791422012-09-28 04:39:44 +00002083 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2084 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002085 return 0;
2086}
2087
2088static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089{
2090 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002091 struct be_rx_obj *rxo;
2092 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093
Sathya Perla3abcded2010-10-03 22:12:27 -07002094 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002095 q = &rxo->cq;
2096 if (q->created)
2097 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2098 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100}
2101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002103{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002104 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 struct be_rx_obj *rxo;
2106 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108 /* We'll create as many RSS rings as there are irqs.
2109 * But when there's only one irq there's no use creating RSS rings
2110 */
2111 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2112 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002113 if (adapter->num_rx_qs != MAX_RX_QS) {
2114 rtnl_lock();
2115 netif_set_real_num_rx_queues(adapter->netdev,
2116 adapter->num_rx_qs);
2117 rtnl_unlock();
2118 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002121 for_all_rx_queues(adapter, rxo, i) {
2122 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 cq = &rxo->cq;
2124 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2125 sizeof(struct be_eth_rx_compl));
2126 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2130 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134
Sathya Perlad3791422012-09-28 04:39:44 +00002135 dev_info(&adapter->pdev->dev,
2136 "created %d RSS queue(s) and 1 default RX queue\n",
2137 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002139}
2140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141static irqreturn_t be_intx(int irq, void *dev)
2142{
Sathya Perlae49cc342012-11-27 19:50:02 +00002143 struct be_eq_obj *eqo = dev;
2144 struct be_adapter *adapter = eqo->adapter;
2145 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002147 /* IRQ is not expected when NAPI is scheduled as the EQ
2148 * will not be armed.
2149 * But, this can happen on Lancer INTx where it takes
2150 * a while to de-assert INTx or in BE2 where occasionaly
2151 * an interrupt may be raised even when EQ is unarmed.
2152 * If NAPI is already scheduled, then counting & notifying
2153 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002154 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002155 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002156 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002157 __napi_schedule(&eqo->napi);
2158 if (num_evts)
2159 eqo->spurious_intr = 0;
2160 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002161 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002162
2163 /* Return IRQ_HANDLED only for the the first spurious intr
2164 * after a valid intr to stop the kernel from branding
2165 * this irq as a bad one!
2166 */
2167 if (num_evts || eqo->spurious_intr++ == 0)
2168 return IRQ_HANDLED;
2169 else
2170 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171}
2172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176
Sathya Perla0b545a62012-11-23 00:27:18 +00002177 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2178 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 return IRQ_HANDLED;
2180}
2181
Sathya Perla2e588f82011-03-11 02:49:26 +00002182static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183{
Somnath Koture38b1702013-05-29 22:55:56 +00002184 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185}
2186
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2188 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189{
Sathya Perla3abcded2010-10-03 22:12:27 -07002190 struct be_adapter *adapter = rxo->adapter;
2191 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002192 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 u32 work_done;
2194
2195 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 if (!rxcp)
2198 break;
2199
Sathya Perla12004ae2011-08-02 19:57:46 +00002200 /* Is it a flush compl that has no data */
2201 if (unlikely(rxcp->num_rcvd == 0))
2202 goto loop_continue;
2203
2204 /* Discard compl with partial DMA Lancer B0 */
2205 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002207 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002208 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002209
Sathya Perla12004ae2011-08-02 19:57:46 +00002210 /* On BE drop pkts that arrive due to imperfect filtering in
2211 * promiscuous mode on some skews
2212 */
2213 if (unlikely(rxcp->port != adapter->port_num &&
2214 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002216 goto loop_continue;
2217 }
2218
2219 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002221 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002223loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002224 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225 }
2226
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227 if (work_done) {
2228 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002229
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2231 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234 return work_done;
2235}
2236
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2238 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 for (work_done = 0; work_done < budget; work_done++) {
2244 txcp = be_tx_compl_get(&txo->cq);
2245 if (!txcp)
2246 break;
2247 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002248 AMAP_GET_BITS(struct amap_eth_tx_compl,
2249 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 }
2251
2252 if (work_done) {
2253 be_cq_notify(adapter, txo->cq.id, true, work_done);
2254 atomic_sub(num_wrbs, &txo->q.used);
2255
2256 /* As Tx wrbs have been freed up, wake up netdev queue
2257 * if it was stopped due to lack of tx wrbs. */
2258 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2259 atomic_read(&txo->q.used) < txo->q.len / 2) {
2260 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002261 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2264 tx_stats(txo)->tx_compl += work_done;
2265 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2266 }
2267 return (work_done < budget); /* Done */
2268}
Sathya Perla3c8def92011-06-12 20:01:58 +00002269
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270int be_poll(struct napi_struct *napi, int budget)
2271{
2272 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2273 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002274 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002276
Sathya Perla0b545a62012-11-23 00:27:18 +00002277 num_evts = events_get(eqo);
2278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279 /* Process all TXQs serviced by this EQ */
2280 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2281 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2282 eqo->tx_budget, i);
2283 if (!tx_done)
2284 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285 }
2286
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 /* This loop will iterate twice for EQ0 in which
2288 * completions of the last RXQ (default one) are also processed
2289 * For other EQs the loop iterates only once
2290 */
2291 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2292 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2293 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002294 }
2295
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 if (is_mcc_eqo(eqo))
2297 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002298
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 if (max_work < budget) {
2300 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002301 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002302 } else {
2303 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002304 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002305 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307}
2308
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002309void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002310{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002311 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2312 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002313 u32 i;
2314
Sathya Perlad23e9462012-12-17 19:38:51 +00002315 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002316 return;
2317
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002318 if (lancer_chip(adapter)) {
2319 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2320 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2321 sliport_err1 = ioread32(adapter->db +
2322 SLIPORT_ERROR1_OFFSET);
2323 sliport_err2 = ioread32(adapter->db +
2324 SLIPORT_ERROR2_OFFSET);
2325 }
2326 } else {
2327 pci_read_config_dword(adapter->pdev,
2328 PCICFG_UE_STATUS_LOW, &ue_lo);
2329 pci_read_config_dword(adapter->pdev,
2330 PCICFG_UE_STATUS_HIGH, &ue_hi);
2331 pci_read_config_dword(adapter->pdev,
2332 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2333 pci_read_config_dword(adapter->pdev,
2334 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002335
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002336 ue_lo = (ue_lo & ~ue_lo_mask);
2337 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002338 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002339
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002340 /* On certain platforms BE hardware can indicate spurious UEs.
2341 * Allow the h/w to stop working completely in case of a real UE.
2342 * Hence not setting the hw_error for UE detection.
2343 */
2344 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002345 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002346 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002347 "Error detected in the card\n");
2348 }
2349
2350 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2351 dev_err(&adapter->pdev->dev,
2352 "ERR: sliport status 0x%x\n", sliport_status);
2353 dev_err(&adapter->pdev->dev,
2354 "ERR: sliport error1 0x%x\n", sliport_err1);
2355 dev_err(&adapter->pdev->dev,
2356 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002357 }
2358
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002359 if (ue_lo) {
2360 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2361 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002362 dev_err(&adapter->pdev->dev,
2363 "UE: %s bit set\n", ue_status_low_desc[i]);
2364 }
2365 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002366
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002367 if (ue_hi) {
2368 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2369 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002370 dev_err(&adapter->pdev->dev,
2371 "UE: %s bit set\n", ue_status_hi_desc[i]);
2372 }
2373 }
2374
2375}
2376
Sathya Perla8d56ff12009-11-22 22:02:26 +00002377static void be_msix_disable(struct be_adapter *adapter)
2378{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002379 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002380 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002381 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 }
2383}
2384
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385static uint be_num_rss_want(struct be_adapter *adapter)
2386{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002387 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002388
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002390 (lancer_chip(adapter) ||
2391 (!sriov_want(adapter) && be_physfn(adapter)))) {
2392 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002393 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2394 }
2395 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396}
2397
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002398static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002401 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002402 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404 /* If RSS queues are not used, need a vec for default RX Q */
2405 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002406 if (be_roce_supported(adapter)) {
2407 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2408 (num_online_cpus() + 1));
2409 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2410 num_vec += num_roce_vec;
2411 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2412 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002414
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002415 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002416 adapter->msix_entries[i].entry = i;
2417
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002419 if (status == 0) {
2420 goto done;
2421 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002422 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002423 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2424 num_vec);
2425 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002426 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002427 }
Sathya Perlad3791422012-09-28 04:39:44 +00002428
2429 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002430 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2431 if (!be_physfn(adapter))
2432 return status;
2433 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002434done:
Parav Pandit045508a2012-03-26 14:27:13 +00002435 if (be_roce_supported(adapter)) {
2436 if (num_vec > num_roce_vec) {
2437 adapter->num_msix_vec = num_vec - num_roce_vec;
2438 adapter->num_msix_roce_vec =
2439 num_vec - adapter->num_msix_vec;
2440 } else {
2441 adapter->num_msix_vec = num_vec;
2442 adapter->num_msix_roce_vec = 0;
2443 }
2444 } else
2445 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002446 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002447 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448}
2449
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002450static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002452{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002453 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002454}
2455
2456static int be_msix_register(struct be_adapter *adapter)
2457{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 struct net_device *netdev = adapter->netdev;
2459 struct be_eq_obj *eqo;
2460 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 for_all_evt_queues(adapter, eqo, i) {
2463 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2464 vec = be_msix_vec_get(adapter, eqo);
2465 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002466 if (status)
2467 goto err_msix;
2468 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002469
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002471err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002472 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2473 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2474 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2475 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002476 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002477 return status;
2478}
2479
2480static int be_irq_register(struct be_adapter *adapter)
2481{
2482 struct net_device *netdev = adapter->netdev;
2483 int status;
2484
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002485 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486 status = be_msix_register(adapter);
2487 if (status == 0)
2488 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002489 /* INTx is not supported for VF */
2490 if (!be_physfn(adapter))
2491 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492 }
2493
Sathya Perlae49cc342012-11-27 19:50:02 +00002494 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 netdev->irq = adapter->pdev->irq;
2496 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002497 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498 if (status) {
2499 dev_err(&adapter->pdev->dev,
2500 "INTx request IRQ failed - err %d\n", status);
2501 return status;
2502 }
2503done:
2504 adapter->isr_registered = true;
2505 return 0;
2506}
2507
2508static void be_irq_unregister(struct be_adapter *adapter)
2509{
2510 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002511 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002512 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513
2514 if (!adapter->isr_registered)
2515 return;
2516
2517 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002518 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002519 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520 goto done;
2521 }
2522
2523 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524 for_all_evt_queues(adapter, eqo, i)
2525 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527done:
2528 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529}
2530
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002531static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002532{
2533 struct be_queue_info *q;
2534 struct be_rx_obj *rxo;
2535 int i;
2536
2537 for_all_rx_queues(adapter, rxo, i) {
2538 q = &rxo->q;
2539 if (q->created) {
2540 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002542 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002544 }
2545}
2546
Sathya Perla889cd4b2010-05-30 23:33:45 +00002547static int be_close(struct net_device *netdev)
2548{
2549 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002550 struct be_eq_obj *eqo;
2551 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002552
Parav Pandit045508a2012-03-26 14:27:13 +00002553 be_roce_dev_close(adapter);
2554
Somnath Kotur04d3d622013-05-02 03:36:55 +00002555 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2556 for_all_evt_queues(adapter, eqo, i)
2557 napi_disable(&eqo->napi);
2558 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002560
2561 be_async_mcc_disable(adapter);
2562
2563 /* Wait for all pending tx completions to arrive so that
2564 * all tx skbs are freed.
2565 */
2566 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002567 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002568
2569 be_rx_qs_destroy(adapter);
2570
2571 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 if (msix_enabled(adapter))
2573 synchronize_irq(be_msix_vec_get(adapter, eqo));
2574 else
2575 synchronize_irq(netdev->irq);
2576 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002577 }
2578
Sathya Perla889cd4b2010-05-30 23:33:45 +00002579 be_irq_unregister(adapter);
2580
Sathya Perla482c9e72011-06-29 23:33:17 +00002581 return 0;
2582}
2583
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002584static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002585{
2586 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002587 int rc, i, j;
2588 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002589
2590 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002591 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2592 sizeof(struct be_eth_rx_d));
2593 if (rc)
2594 return rc;
2595 }
2596
2597 /* The FW would like the default RXQ to be created first */
2598 rxo = default_rxo(adapter);
2599 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2600 adapter->if_handle, false, &rxo->rss_id);
2601 if (rc)
2602 return rc;
2603
2604 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002605 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002606 rx_frag_size, adapter->if_handle,
2607 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002608 if (rc)
2609 return rc;
2610 }
2611
2612 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002613 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2614 for_all_rss_queues(adapter, rxo, i) {
2615 if ((j + i) >= 128)
2616 break;
2617 rsstable[j + i] = rxo->rss_id;
2618 }
2619 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002620 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2621 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2622
2623 if (!BEx_chip(adapter))
2624 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2625 RSS_ENABLE_UDP_IPV6;
2626
2627 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2628 128);
2629 if (rc) {
2630 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002631 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002632 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002633 }
2634
2635 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002637 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002638 return 0;
2639}
2640
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641static int be_open(struct net_device *netdev)
2642{
2643 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002645 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002647 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002648 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002649
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002650 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002651 if (status)
2652 goto err;
2653
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002654 status = be_irq_register(adapter);
2655 if (status)
2656 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002659 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661 for_all_tx_queues(adapter, txo, i)
2662 be_cq_notify(adapter, txo->cq.id, true, 0);
2663
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002664 be_async_mcc_enable(adapter);
2665
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002666 for_all_evt_queues(adapter, eqo, i) {
2667 napi_enable(&eqo->napi);
2668 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2669 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002670 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002671
Sathya Perla323ff712012-09-28 04:39:43 +00002672 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002673 if (!status)
2674 be_link_status_update(adapter, link_status);
2675
Sathya Perlafba87552013-05-08 02:05:50 +00002676 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002677 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002678 return 0;
2679err:
2680 be_close(adapter->netdev);
2681 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002682}
2683
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002684static int be_setup_wol(struct be_adapter *adapter, bool enable)
2685{
2686 struct be_dma_mem cmd;
2687 int status = 0;
2688 u8 mac[ETH_ALEN];
2689
2690 memset(mac, 0, ETH_ALEN);
2691
2692 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002693 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002694 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002695 if (cmd.va == NULL)
2696 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002697
2698 if (enable) {
2699 status = pci_write_config_dword(adapter->pdev,
2700 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2701 if (status) {
2702 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002703 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002704 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2705 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002706 return status;
2707 }
2708 status = be_cmd_enable_magic_wol(adapter,
2709 adapter->netdev->dev_addr, &cmd);
2710 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2711 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2712 } else {
2713 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2714 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2715 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2716 }
2717
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002718 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002719 return status;
2720}
2721
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002722/*
2723 * Generate a seed MAC address from the PF MAC Address using jhash.
2724 * MAC Address for VFs are assigned incrementally starting from the seed.
2725 * These addresses are programmed in the ASIC by the PF and the VF driver
2726 * queries for the MAC address during its probe.
2727 */
Sathya Perla4c876612013-02-03 20:30:11 +00002728static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002729{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002730 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002731 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002732 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002733 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002734
2735 be_vf_eth_addr_generate(adapter, mac);
2736
Sathya Perla11ac75e2011-12-13 00:58:50 +00002737 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002738 if (lancer_chip(adapter)) {
2739 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2740 } else {
2741 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002742 vf_cfg->if_handle,
2743 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002744 }
2745
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002746 if (status)
2747 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002748 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002749 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002750 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002751
2752 mac[5] += 1;
2753 }
2754 return status;
2755}
2756
Sathya Perla4c876612013-02-03 20:30:11 +00002757static int be_vfs_mac_query(struct be_adapter *adapter)
2758{
2759 int status, vf;
2760 u8 mac[ETH_ALEN];
2761 struct be_vf_cfg *vf_cfg;
2762 bool active;
2763
2764 for_all_vfs(adapter, vf_cfg, vf) {
2765 be_cmd_get_mac_from_list(adapter, mac, &active,
2766 &vf_cfg->pmac_id, 0);
2767
2768 status = be_cmd_mac_addr_query(adapter, mac, false,
2769 vf_cfg->if_handle, 0);
2770 if (status)
2771 return status;
2772 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2773 }
2774 return 0;
2775}
2776
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002777static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002778{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002779 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002780 u32 vf;
2781
Sathya Perla257a3fe2013-06-14 15:54:51 +05302782 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002783 dev_warn(&adapter->pdev->dev,
2784 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002785 goto done;
2786 }
2787
Sathya Perlab4c1df92013-05-08 02:05:47 +00002788 pci_disable_sriov(adapter->pdev);
2789
Sathya Perla11ac75e2011-12-13 00:58:50 +00002790 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002791 if (lancer_chip(adapter))
2792 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2793 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002794 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2795 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002796
Sathya Perla11ac75e2011-12-13 00:58:50 +00002797 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2798 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002799done:
2800 kfree(adapter->vf_cfg);
2801 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002802}
2803
Sathya Perlaa54769f2011-10-24 02:45:00 +00002804static int be_clear(struct be_adapter *adapter)
2805{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002806 int i = 1;
2807
Sathya Perla191eb752012-02-23 18:50:13 +00002808 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2809 cancel_delayed_work_sync(&adapter->work);
2810 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2811 }
2812
Sathya Perla11ac75e2011-12-13 00:58:50 +00002813 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002814 be_vf_clear(adapter);
2815
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002816 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2817 be_cmd_pmac_del(adapter, adapter->if_handle,
2818 adapter->pmac_id[i], 0);
2819
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002820 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002821
2822 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002823 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002824 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002826
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002827 kfree(adapter->pmac_id);
2828 adapter->pmac_id = NULL;
2829
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002831 return 0;
2832}
2833
Sathya Perla4c876612013-02-03 20:30:11 +00002834static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002835{
Sathya Perla4c876612013-02-03 20:30:11 +00002836 struct be_vf_cfg *vf_cfg;
2837 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002838 int status;
2839
Sathya Perla4c876612013-02-03 20:30:11 +00002840 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2841 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002842
Sathya Perla4c876612013-02-03 20:30:11 +00002843 for_all_vfs(adapter, vf_cfg, vf) {
2844 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002845 be_cmd_get_profile_config(adapter, &cap_flags,
2846 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002847
2848 /* If a FW profile exists, then cap_flags are updated */
2849 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2850 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2851 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2852 &vf_cfg->if_handle, vf + 1);
2853 if (status)
2854 goto err;
2855 }
2856err:
2857 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002858}
2859
Sathya Perla39f1d942012-05-08 19:41:24 +00002860static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002861{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002862 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002863 int vf;
2864
Sathya Perla39f1d942012-05-08 19:41:24 +00002865 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2866 GFP_KERNEL);
2867 if (!adapter->vf_cfg)
2868 return -ENOMEM;
2869
Sathya Perla11ac75e2011-12-13 00:58:50 +00002870 for_all_vfs(adapter, vf_cfg, vf) {
2871 vf_cfg->if_handle = -1;
2872 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002873 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002874 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002875}
2876
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002877static int be_vf_setup(struct be_adapter *adapter)
2878{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002879 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002880 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002881 int status, old_vfs, vf;
2882 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302883 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002884
Sathya Perla257a3fe2013-06-14 15:54:51 +05302885 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002886 if (old_vfs) {
2887 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2888 if (old_vfs != num_vfs)
2889 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2890 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002891 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002892 if (num_vfs > adapter->dev_num_vfs)
2893 dev_info(dev, "Device supports %d VFs and not %d\n",
2894 adapter->dev_num_vfs, num_vfs);
2895 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002896 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002897 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002898 }
2899
2900 status = be_vf_setup_init(adapter);
2901 if (status)
2902 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002903
Sathya Perla4c876612013-02-03 20:30:11 +00002904 if (old_vfs) {
2905 for_all_vfs(adapter, vf_cfg, vf) {
2906 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2907 if (status)
2908 goto err;
2909 }
2910 } else {
2911 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002912 if (status)
2913 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002914 }
2915
Sathya Perla4c876612013-02-03 20:30:11 +00002916 if (old_vfs) {
2917 status = be_vfs_mac_query(adapter);
2918 if (status)
2919 goto err;
2920 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002921 status = be_vf_eth_addr_config(adapter);
2922 if (status)
2923 goto err;
2924 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002925
Sathya Perla11ac75e2011-12-13 00:58:50 +00002926 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302927 /* Allow VFs to programs MAC/VLAN filters */
2928 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2929 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2930 status = be_cmd_set_fn_privileges(adapter,
2931 privileges |
2932 BE_PRIV_FILTMGMT,
2933 vf + 1);
2934 if (!status)
2935 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2936 vf);
2937 }
2938
Sathya Perla4c876612013-02-03 20:30:11 +00002939 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2940 * Allow full available bandwidth
2941 */
2942 if (BE3_chip(adapter) && !old_vfs)
2943 be_cmd_set_qos(adapter, 1000, vf+1);
2944
2945 status = be_cmd_link_status_query(adapter, &lnk_speed,
2946 NULL, vf + 1);
2947 if (!status)
2948 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002949
2950 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002951 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002952 if (status)
2953 goto err;
2954 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002955
2956 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002957 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002958
2959 if (!old_vfs) {
2960 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2961 if (status) {
2962 dev_err(dev, "SRIOV enable failed\n");
2963 adapter->num_vfs = 0;
2964 goto err;
2965 }
2966 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002967 return 0;
2968err:
Sathya Perla4c876612013-02-03 20:30:11 +00002969 dev_err(dev, "VF setup failed\n");
2970 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002971 return status;
2972}
2973
Sathya Perla30128032011-11-10 19:17:57 +00002974static void be_setup_init(struct be_adapter *adapter)
2975{
2976 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002977 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002978 adapter->if_handle = -1;
2979 adapter->be3_native = false;
2980 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002981 if (be_physfn(adapter))
2982 adapter->cmd_privileges = MAX_PRIVILEGES;
2983 else
2984 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002985}
2986
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002987static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2988 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002989{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002990 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002991
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002992 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2993 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2994 if (!lancer_chip(adapter) && !be_physfn(adapter))
2995 *active_mac = true;
2996 else
2997 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002998
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002999 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003000 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003001
3002 if (lancer_chip(adapter)) {
3003 status = be_cmd_get_mac_from_list(adapter, mac,
3004 active_mac, pmac_id, 0);
3005 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00003006 status = be_cmd_mac_addr_query(adapter, mac, false,
3007 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003008 }
3009 } else if (be_physfn(adapter)) {
3010 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00003011 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003012 *active_mac = false;
3013 } else {
3014 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00003015 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003016 if_handle, 0);
3017 *active_mac = true;
3018 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003019 return status;
3020}
3021
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003022static void be_get_resources(struct be_adapter *adapter)
3023{
Sathya Perla4c876612013-02-03 20:30:11 +00003024 u16 dev_num_vfs;
3025 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003026 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003027 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003028
Sathya Perla4c876612013-02-03 20:30:11 +00003029 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003030 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003031 if (!status)
3032 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003033 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3034 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003035 }
3036
3037 if (profile_present) {
3038 /* Sanity fixes for Lancer */
3039 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3040 BE_UC_PMAC_COUNT);
3041 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3042 BE_NUM_VLANS_SUPPORTED);
3043 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3044 BE_MAX_MC);
3045 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3046 MAX_TX_QS);
3047 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3048 BE3_MAX_RSS_QS);
3049 adapter->max_event_queues = min_t(u16,
3050 adapter->max_event_queues,
3051 BE3_MAX_RSS_QS);
3052
3053 if (adapter->max_rss_queues &&
3054 adapter->max_rss_queues == adapter->max_rx_queues)
3055 adapter->max_rss_queues -= 1;
3056
3057 if (adapter->max_event_queues < adapter->max_rss_queues)
3058 adapter->max_rss_queues = adapter->max_event_queues;
3059
3060 } else {
3061 if (be_physfn(adapter))
3062 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3063 else
3064 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3065
3066 if (adapter->function_mode & FLEX10_MODE)
3067 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3068 else
3069 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3070
3071 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003072 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3073 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3074 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003075 adapter->max_rss_queues = (adapter->be3_native) ?
3076 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3077 adapter->max_event_queues = BE3_MAX_RSS_QS;
3078
3079 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3080 BE_IF_FLAGS_BROADCAST |
3081 BE_IF_FLAGS_MULTICAST |
3082 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3083 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3084 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3085 BE_IF_FLAGS_PROMISCUOUS;
3086
3087 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3088 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3089 }
Sathya Perla4c876612013-02-03 20:30:11 +00003090
3091 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3092 if (pos) {
3093 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3094 &dev_num_vfs);
3095 if (BE3_chip(adapter))
3096 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3097 adapter->dev_num_vfs = dev_num_vfs;
3098 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003099}
3100
Sathya Perla39f1d942012-05-08 19:41:24 +00003101/* Routine to query per function resource limits */
3102static int be_get_config(struct be_adapter *adapter)
3103{
Sathya Perla4c876612013-02-03 20:30:11 +00003104 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003105
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003106 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3107 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003108 &adapter->function_caps,
3109 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003110 if (status)
3111 goto err;
3112
3113 be_get_resources(adapter);
3114
3115 /* primary mac needs 1 pmac entry */
3116 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3117 sizeof(u32), GFP_KERNEL);
3118 if (!adapter->pmac_id) {
3119 status = -ENOMEM;
3120 goto err;
3121 }
3122
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003123err:
3124 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003125}
3126
Sathya Perla5fb379e2009-06-18 00:02:59 +00003127static int be_setup(struct be_adapter *adapter)
3128{
Sathya Perla39f1d942012-05-08 19:41:24 +00003129 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003130 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003131 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003132 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003133 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003134 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135
Sathya Perla30128032011-11-10 19:17:57 +00003136 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003137
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003138 if (!lancer_chip(adapter))
3139 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003140
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003141 status = be_get_config(adapter);
3142 if (status)
3143 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003144
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003145 status = be_msix_enable(adapter);
3146 if (status)
3147 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003148
3149 status = be_evt_queues_create(adapter);
3150 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003151 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003153 status = be_tx_cqs_create(adapter);
3154 if (status)
3155 goto err;
3156
3157 status = be_rx_cqs_create(adapter);
3158 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003159 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003160
Sathya Perla5fb379e2009-06-18 00:02:59 +00003161 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003162 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003163 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003165 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3166 /* In UMC mode FW does not return right privileges.
3167 * Override with correct privilege equivalent to PF.
3168 */
3169 if (be_is_mc(adapter))
3170 adapter->cmd_privileges = MAX_PRIVILEGES;
3171
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003172 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3173 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003174
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003175 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003176 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003177
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003178 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003179
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003180 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003181 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003182 if (status != 0)
3183 goto err;
3184
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003185 memset(mac, 0, ETH_ALEN);
3186 active_mac = false;
3187 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3188 &active_mac, &adapter->pmac_id[0]);
3189 if (status != 0)
3190 goto err;
3191
3192 if (!active_mac) {
3193 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3194 &adapter->pmac_id[0], 0);
3195 if (status != 0)
3196 goto err;
3197 }
3198
3199 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3200 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3201 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003202 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003203
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003204 status = be_tx_qs_create(adapter);
3205 if (status)
3206 goto err;
3207
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003208 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003209
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003210 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003211 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003212
3213 be_set_rx_mode(adapter->netdev);
3214
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003215 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003216
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003217 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3218 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003219 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003220
Sathya Perlab4c1df92013-05-08 02:05:47 +00003221 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003222 if (adapter->dev_num_vfs)
3223 be_vf_setup(adapter);
3224 else
3225 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003226 }
3227
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003228 status = be_cmd_get_phy_info(adapter);
3229 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003230 adapter->phy.fc_autoneg = 1;
3231
Sathya Perla191eb752012-02-23 18:50:13 +00003232 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3233 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003234 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003235err:
3236 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237 return status;
3238}
3239
Ivan Vecera66268732011-12-08 01:31:21 +00003240#ifdef CONFIG_NET_POLL_CONTROLLER
3241static void be_netpoll(struct net_device *netdev)
3242{
3243 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003244 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003245 int i;
3246
Sathya Perlae49cc342012-11-27 19:50:02 +00003247 for_all_evt_queues(adapter, eqo, i) {
3248 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3249 napi_schedule(&eqo->napi);
3250 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003251
3252 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003253}
3254#endif
3255
Ajit Khaparde84517482009-09-04 03:12:16 +00003256#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003257char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3258
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003259static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003260 const u8 *p, u32 img_start, int image_size,
3261 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003262{
3263 u32 crc_offset;
3264 u8 flashed_crc[4];
3265 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003266
3267 crc_offset = hdr_size + img_start + image_size - 4;
3268
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003269 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003270
3271 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003272 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003273 if (status) {
3274 dev_err(&adapter->pdev->dev,
3275 "could not get crc from flash, not flashing redboot\n");
3276 return false;
3277 }
3278
3279 /*update redboot only if crc does not match*/
3280 if (!memcmp(flashed_crc, p, 4))
3281 return false;
3282 else
3283 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003284}
3285
Sathya Perla306f1342011-08-02 19:57:45 +00003286static bool phy_flashing_required(struct be_adapter *adapter)
3287{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003288 return (adapter->phy.phy_type == TN_8022 &&
3289 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003290}
3291
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003292static bool is_comp_in_ufi(struct be_adapter *adapter,
3293 struct flash_section_info *fsec, int type)
3294{
3295 int i = 0, img_type = 0;
3296 struct flash_section_info_g2 *fsec_g2 = NULL;
3297
Sathya Perlaca34fe32012-11-06 17:48:56 +00003298 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003299 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3300
3301 for (i = 0; i < MAX_FLASH_COMP; i++) {
3302 if (fsec_g2)
3303 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3304 else
3305 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3306
3307 if (img_type == type)
3308 return true;
3309 }
3310 return false;
3311
3312}
3313
3314struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3315 int header_size,
3316 const struct firmware *fw)
3317{
3318 struct flash_section_info *fsec = NULL;
3319 const u8 *p = fw->data;
3320
3321 p += header_size;
3322 while (p < (fw->data + fw->size)) {
3323 fsec = (struct flash_section_info *)p;
3324 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3325 return fsec;
3326 p += 32;
3327 }
3328 return NULL;
3329}
3330
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003331static int be_flash(struct be_adapter *adapter, const u8 *img,
3332 struct be_dma_mem *flash_cmd, int optype, int img_size)
3333{
3334 u32 total_bytes = 0, flash_op, num_bytes = 0;
3335 int status = 0;
3336 struct be_cmd_write_flashrom *req = flash_cmd->va;
3337
3338 total_bytes = img_size;
3339 while (total_bytes) {
3340 num_bytes = min_t(u32, 32*1024, total_bytes);
3341
3342 total_bytes -= num_bytes;
3343
3344 if (!total_bytes) {
3345 if (optype == OPTYPE_PHY_FW)
3346 flash_op = FLASHROM_OPER_PHY_FLASH;
3347 else
3348 flash_op = FLASHROM_OPER_FLASH;
3349 } else {
3350 if (optype == OPTYPE_PHY_FW)
3351 flash_op = FLASHROM_OPER_PHY_SAVE;
3352 else
3353 flash_op = FLASHROM_OPER_SAVE;
3354 }
3355
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003356 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003357 img += num_bytes;
3358 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3359 flash_op, num_bytes);
3360 if (status) {
3361 if (status == ILLEGAL_IOCTL_REQ &&
3362 optype == OPTYPE_PHY_FW)
3363 break;
3364 dev_err(&adapter->pdev->dev,
3365 "cmd to write to flash rom failed.\n");
3366 return status;
3367 }
3368 }
3369 return 0;
3370}
3371
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003372/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003373static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003374 const struct firmware *fw,
3375 struct be_dma_mem *flash_cmd,
3376 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003377
Ajit Khaparde84517482009-09-04 03:12:16 +00003378{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003379 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003380 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003381 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003382 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003383 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003384 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003385
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003386 struct flash_comp gen3_flash_types[] = {
3387 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3388 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3389 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3390 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3391 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3392 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3393 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3394 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3395 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3396 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3397 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3398 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3399 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3400 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3401 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3402 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3403 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3404 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3405 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3406 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003407 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003408
3409 struct flash_comp gen2_flash_types[] = {
3410 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3411 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3412 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3413 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3414 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3415 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3416 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3417 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3418 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3419 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3420 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3421 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3422 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3423 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3424 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3425 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003426 };
3427
Sathya Perlaca34fe32012-11-06 17:48:56 +00003428 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003429 pflashcomp = gen3_flash_types;
3430 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003431 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003432 } else {
3433 pflashcomp = gen2_flash_types;
3434 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003435 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003436 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003437
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003438 /* Get flash section info*/
3439 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3440 if (!fsec) {
3441 dev_err(&adapter->pdev->dev,
3442 "Invalid Cookie. UFI corrupted ?\n");
3443 return -1;
3444 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003445 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003446 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003447 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003448
3449 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3450 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3451 continue;
3452
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003453 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3454 !phy_flashing_required(adapter))
3455 continue;
3456
3457 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3458 redboot = be_flash_redboot(adapter, fw->data,
3459 pflashcomp[i].offset, pflashcomp[i].size,
3460 filehdr_size + img_hdrs_size);
3461 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003462 continue;
3463 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003464
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003465 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003466 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003467 if (p + pflashcomp[i].size > fw->data + fw->size)
3468 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003469
3470 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3471 pflashcomp[i].size);
3472 if (status) {
3473 dev_err(&adapter->pdev->dev,
3474 "Flashing section type %d failed.\n",
3475 pflashcomp[i].img_type);
3476 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003477 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003478 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003479 return 0;
3480}
3481
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003482static int be_flash_skyhawk(struct be_adapter *adapter,
3483 const struct firmware *fw,
3484 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003485{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003486 int status = 0, i, filehdr_size = 0;
3487 int img_offset, img_size, img_optype, redboot;
3488 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3489 const u8 *p = fw->data;
3490 struct flash_section_info *fsec = NULL;
3491
3492 filehdr_size = sizeof(struct flash_file_hdr_g3);
3493 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3494 if (!fsec) {
3495 dev_err(&adapter->pdev->dev,
3496 "Invalid Cookie. UFI corrupted ?\n");
3497 return -1;
3498 }
3499
3500 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3501 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3502 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3503
3504 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3505 case IMAGE_FIRMWARE_iSCSI:
3506 img_optype = OPTYPE_ISCSI_ACTIVE;
3507 break;
3508 case IMAGE_BOOT_CODE:
3509 img_optype = OPTYPE_REDBOOT;
3510 break;
3511 case IMAGE_OPTION_ROM_ISCSI:
3512 img_optype = OPTYPE_BIOS;
3513 break;
3514 case IMAGE_OPTION_ROM_PXE:
3515 img_optype = OPTYPE_PXE_BIOS;
3516 break;
3517 case IMAGE_OPTION_ROM_FCoE:
3518 img_optype = OPTYPE_FCOE_BIOS;
3519 break;
3520 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3521 img_optype = OPTYPE_ISCSI_BACKUP;
3522 break;
3523 case IMAGE_NCSI:
3524 img_optype = OPTYPE_NCSI_FW;
3525 break;
3526 default:
3527 continue;
3528 }
3529
3530 if (img_optype == OPTYPE_REDBOOT) {
3531 redboot = be_flash_redboot(adapter, fw->data,
3532 img_offset, img_size,
3533 filehdr_size + img_hdrs_size);
3534 if (!redboot)
3535 continue;
3536 }
3537
3538 p = fw->data;
3539 p += filehdr_size + img_offset + img_hdrs_size;
3540 if (p + img_size > fw->data + fw->size)
3541 return -1;
3542
3543 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3544 if (status) {
3545 dev_err(&adapter->pdev->dev,
3546 "Flashing section type %d failed.\n",
3547 fsec->fsec_entry[i].type);
3548 return status;
3549 }
3550 }
3551 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003552}
3553
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003554static int lancer_fw_download(struct be_adapter *adapter,
3555 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003556{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003557#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3558#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3559 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003560 const u8 *data_ptr = NULL;
3561 u8 *dest_image_ptr = NULL;
3562 size_t image_size = 0;
3563 u32 chunk_size = 0;
3564 u32 data_written = 0;
3565 u32 offset = 0;
3566 int status = 0;
3567 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003568 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003569
3570 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3571 dev_err(&adapter->pdev->dev,
3572 "FW Image not properly aligned. "
3573 "Length must be 4 byte aligned.\n");
3574 status = -EINVAL;
3575 goto lancer_fw_exit;
3576 }
3577
3578 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3579 + LANCER_FW_DOWNLOAD_CHUNK;
3580 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003581 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003582 if (!flash_cmd.va) {
3583 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003584 goto lancer_fw_exit;
3585 }
3586
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003587 dest_image_ptr = flash_cmd.va +
3588 sizeof(struct lancer_cmd_req_write_object);
3589 image_size = fw->size;
3590 data_ptr = fw->data;
3591
3592 while (image_size) {
3593 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3594
3595 /* Copy the image chunk content. */
3596 memcpy(dest_image_ptr, data_ptr, chunk_size);
3597
3598 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003599 chunk_size, offset,
3600 LANCER_FW_DOWNLOAD_LOCATION,
3601 &data_written, &change_status,
3602 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003603 if (status)
3604 break;
3605
3606 offset += data_written;
3607 data_ptr += data_written;
3608 image_size -= data_written;
3609 }
3610
3611 if (!status) {
3612 /* Commit the FW written */
3613 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003614 0, offset,
3615 LANCER_FW_DOWNLOAD_LOCATION,
3616 &data_written, &change_status,
3617 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003618 }
3619
3620 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3621 flash_cmd.dma);
3622 if (status) {
3623 dev_err(&adapter->pdev->dev,
3624 "Firmware load error. "
3625 "Status code: 0x%x Additional Status: 0x%x\n",
3626 status, add_status);
3627 goto lancer_fw_exit;
3628 }
3629
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003630 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003631 status = lancer_physdev_ctrl(adapter,
3632 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003633 if (status) {
3634 dev_err(&adapter->pdev->dev,
3635 "Adapter busy for FW reset.\n"
3636 "New FW will not be active.\n");
3637 goto lancer_fw_exit;
3638 }
3639 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3640 dev_err(&adapter->pdev->dev,
3641 "System reboot required for new FW"
3642 " to be active\n");
3643 }
3644
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003645 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3646lancer_fw_exit:
3647 return status;
3648}
3649
Sathya Perlaca34fe32012-11-06 17:48:56 +00003650#define UFI_TYPE2 2
3651#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003652#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003653#define UFI_TYPE4 4
3654static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003655 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003656{
3657 if (fhdr == NULL)
3658 goto be_get_ufi_exit;
3659
Sathya Perlaca34fe32012-11-06 17:48:56 +00003660 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3661 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003662 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3663 if (fhdr->asic_type_rev == 0x10)
3664 return UFI_TYPE3R;
3665 else
3666 return UFI_TYPE3;
3667 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003668 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003669
3670be_get_ufi_exit:
3671 dev_err(&adapter->pdev->dev,
3672 "UFI and Interface are not compatible for flashing\n");
3673 return -1;
3674}
3675
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003676static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3677{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003678 struct flash_file_hdr_g3 *fhdr3;
3679 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003680 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003681 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003682 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003683
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003684 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003685 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3686 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003687 if (!flash_cmd.va) {
3688 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003689 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003690 }
3691
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003692 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003693 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003694
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003695 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003696
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003697 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3698 for (i = 0; i < num_imgs; i++) {
3699 img_hdr_ptr = (struct image_hdr *)(fw->data +
3700 (sizeof(struct flash_file_hdr_g3) +
3701 i * sizeof(struct image_hdr)));
3702 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003703 switch (ufi_type) {
3704 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003705 status = be_flash_skyhawk(adapter, fw,
3706 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003707 break;
3708 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003709 status = be_flash_BEx(adapter, fw, &flash_cmd,
3710 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003711 break;
3712 case UFI_TYPE3:
3713 /* Do not flash this ufi on BE3-R cards */
3714 if (adapter->asic_rev < 0x10)
3715 status = be_flash_BEx(adapter, fw,
3716 &flash_cmd,
3717 num_imgs);
3718 else {
3719 status = -1;
3720 dev_err(&adapter->pdev->dev,
3721 "Can't load BE3 UFI on BE3R\n");
3722 }
3723 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003724 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003725 }
3726
Sathya Perlaca34fe32012-11-06 17:48:56 +00003727 if (ufi_type == UFI_TYPE2)
3728 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003729 else if (ufi_type == -1)
3730 status = -1;
3731
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003732 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3733 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003734 if (status) {
3735 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003736 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003737 }
3738
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003739 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003740
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003741be_fw_exit:
3742 return status;
3743}
3744
3745int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3746{
3747 const struct firmware *fw;
3748 int status;
3749
3750 if (!netif_running(adapter->netdev)) {
3751 dev_err(&adapter->pdev->dev,
3752 "Firmware load not allowed (interface is down)\n");
3753 return -1;
3754 }
3755
3756 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3757 if (status)
3758 goto fw_exit;
3759
3760 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3761
3762 if (lancer_chip(adapter))
3763 status = lancer_fw_download(adapter, fw);
3764 else
3765 status = be_fw_download(adapter, fw);
3766
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003767 if (!status)
3768 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3769 adapter->fw_on_flash);
3770
Ajit Khaparde84517482009-09-04 03:12:16 +00003771fw_exit:
3772 release_firmware(fw);
3773 return status;
3774}
3775
stephen hemmingere5686ad2012-01-05 19:10:25 +00003776static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003777 .ndo_open = be_open,
3778 .ndo_stop = be_close,
3779 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003780 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003781 .ndo_set_mac_address = be_mac_addr_set,
3782 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003783 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003785 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3786 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003787 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003788 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003789 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003790 .ndo_get_vf_config = be_get_vf_config,
3791#ifdef CONFIG_NET_POLL_CONTROLLER
3792 .ndo_poll_controller = be_netpoll,
3793#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003794};
3795
3796static void be_netdev_init(struct net_device *netdev)
3797{
3798 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003799 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003800 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003801
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003802 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003803 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003804 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003805 if (be_multi_rxq(adapter))
3806 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003807
3808 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003809 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003810
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003811 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003812 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003813
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003814 netdev->priv_flags |= IFF_UNICAST_FLT;
3815
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003816 netdev->flags |= IFF_MULTICAST;
3817
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003818 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003819
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003820 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003821
3822 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3823
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003824 for_all_evt_queues(adapter, eqo, i)
3825 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826}
3827
3828static void be_unmap_pci_bars(struct be_adapter *adapter)
3829{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003830 if (adapter->csr)
3831 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003832 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003833 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003834}
3835
Sathya Perlace66f782012-11-06 17:48:58 +00003836static int db_bar(struct be_adapter *adapter)
3837{
3838 if (lancer_chip(adapter) || !be_physfn(adapter))
3839 return 0;
3840 else
3841 return 4;
3842}
3843
3844static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003845{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003846 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003847 adapter->roce_db.size = 4096;
3848 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3849 db_bar(adapter));
3850 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3851 db_bar(adapter));
3852 }
Parav Pandit045508a2012-03-26 14:27:13 +00003853 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003854}
3855
3856static int be_map_pci_bars(struct be_adapter *adapter)
3857{
3858 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003859 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003860
Sathya Perlace66f782012-11-06 17:48:58 +00003861 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3862 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3863 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003864
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003865 if (BEx_chip(adapter) && be_physfn(adapter)) {
3866 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3867 if (adapter->csr == NULL)
3868 return -ENOMEM;
3869 }
3870
Sathya Perlace66f782012-11-06 17:48:58 +00003871 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003872 if (addr == NULL)
3873 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003874 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003875
3876 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003877 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003878
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003879pci_map_err:
3880 be_unmap_pci_bars(adapter);
3881 return -ENOMEM;
3882}
3883
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003884static void be_ctrl_cleanup(struct be_adapter *adapter)
3885{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003886 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003887
3888 be_unmap_pci_bars(adapter);
3889
3890 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003891 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3892 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003893
Sathya Perla5b8821b2011-08-02 19:57:44 +00003894 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003895 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003896 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3897 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898}
3899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003900static int be_ctrl_init(struct be_adapter *adapter)
3901{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003902 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3903 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003904 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003905 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003906 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003907
Sathya Perlace66f782012-11-06 17:48:58 +00003908 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3909 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3910 SLI_INTF_FAMILY_SHIFT;
3911 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003913 status = be_map_pci_bars(adapter);
3914 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003915 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003916
3917 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003918 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3919 mbox_mem_alloc->size,
3920 &mbox_mem_alloc->dma,
3921 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003922 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003923 status = -ENOMEM;
3924 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003925 }
3926 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3927 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3928 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3929 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003930
Sathya Perla5b8821b2011-08-02 19:57:44 +00003931 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3932 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003933 &rx_filter->dma,
3934 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003935 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003936 status = -ENOMEM;
3937 goto free_mbox;
3938 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003939
Ivan Vecera29849612010-12-14 05:43:19 +00003940 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003941 spin_lock_init(&adapter->mcc_lock);
3942 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003943
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003944 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003945 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003946 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003947
3948free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003949 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3950 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003951
3952unmap_pci_bars:
3953 be_unmap_pci_bars(adapter);
3954
3955done:
3956 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003957}
3958
3959static void be_stats_cleanup(struct be_adapter *adapter)
3960{
Sathya Perla3abcded2010-10-03 22:12:27 -07003961 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003962
3963 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003964 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3965 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003966}
3967
3968static int be_stats_init(struct be_adapter *adapter)
3969{
Sathya Perla3abcded2010-10-03 22:12:27 -07003970 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003971
Sathya Perlaca34fe32012-11-06 17:48:56 +00003972 if (lancer_chip(adapter))
3973 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3974 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003975 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003976 else
3977 /* BE3 and Skyhawk */
3978 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3979
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003980 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003981 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003982 if (cmd->va == NULL)
3983 return -1;
3984 return 0;
3985}
3986
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003987static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988{
3989 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991 if (!adapter)
3992 return;
3993
Parav Pandit045508a2012-03-26 14:27:13 +00003994 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003995 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003996
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003997 cancel_delayed_work_sync(&adapter->func_recovery_work);
3998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999 unregister_netdev(adapter->netdev);
4000
Sathya Perla5fb379e2009-06-18 00:02:59 +00004001 be_clear(adapter);
4002
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004003 /* tell fw we're done with firing cmds */
4004 be_cmd_fw_clean(adapter);
4005
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004006 be_stats_cleanup(adapter);
4007
4008 be_ctrl_cleanup(adapter);
4009
Sathya Perlad6b6d982012-09-05 01:56:48 +00004010 pci_disable_pcie_error_reporting(pdev);
4011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004012 pci_set_drvdata(pdev, NULL);
4013 pci_release_regions(pdev);
4014 pci_disable_device(pdev);
4015
4016 free_netdev(adapter->netdev);
4017}
4018
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004019bool be_is_wol_supported(struct be_adapter *adapter)
4020{
4021 return ((adapter->wol_cap & BE_WOL_CAP) &&
4022 !be_is_wol_excluded(adapter)) ? true : false;
4023}
4024
Somnath Kotur941a77d2012-05-17 22:59:03 +00004025u32 be_get_fw_log_level(struct be_adapter *adapter)
4026{
4027 struct be_dma_mem extfat_cmd;
4028 struct be_fat_conf_params *cfgs;
4029 int status;
4030 u32 level = 0;
4031 int j;
4032
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004033 if (lancer_chip(adapter))
4034 return 0;
4035
Somnath Kotur941a77d2012-05-17 22:59:03 +00004036 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4037 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4038 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4039 &extfat_cmd.dma);
4040
4041 if (!extfat_cmd.va) {
4042 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4043 __func__);
4044 goto err;
4045 }
4046
4047 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4048 if (!status) {
4049 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4050 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004051 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004052 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4053 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4054 }
4055 }
4056 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4057 extfat_cmd.dma);
4058err:
4059 return level;
4060}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004061
Sathya Perla39f1d942012-05-08 19:41:24 +00004062static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004063{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004065 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004066
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004067 status = be_cmd_get_cntl_attributes(adapter);
4068 if (status)
4069 return status;
4070
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004071 status = be_cmd_get_acpi_wol_cap(adapter);
4072 if (status) {
4073 /* in case of a failure to get wol capabillities
4074 * check the exclusion list to determine WOL capability */
4075 if (!be_is_wol_excluded(adapter))
4076 adapter->wol_cap |= BE_WOL_CAP;
4077 }
4078
4079 if (be_is_wol_supported(adapter))
4080 adapter->wol = true;
4081
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004082 /* Must be a power of 2 or else MODULO will BUG_ON */
4083 adapter->be_get_temp_freq = 64;
4084
Somnath Kotur941a77d2012-05-17 22:59:03 +00004085 level = be_get_fw_log_level(adapter);
4086 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4087
Sathya Perla2243e2e2009-11-22 22:02:03 +00004088 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004089}
4090
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004091static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004092{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004093 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004094 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004095
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004096 status = lancer_test_and_set_rdy_state(adapter);
4097 if (status)
4098 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004099
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004100 if (netif_running(adapter->netdev))
4101 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004102
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004103 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004104
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004105 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004106
4107 status = be_setup(adapter);
4108 if (status)
4109 goto err;
4110
4111 if (netif_running(adapter->netdev)) {
4112 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004113 if (status)
4114 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004115 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004116
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004117 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004118 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004119err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004120 if (status == -EAGAIN)
4121 dev_err(dev, "Waiting for resource provisioning\n");
4122 else
4123 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004124
4125 return status;
4126}
4127
4128static void be_func_recovery_task(struct work_struct *work)
4129{
4130 struct be_adapter *adapter =
4131 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004132 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004133
4134 be_detect_error(adapter);
4135
4136 if (adapter->hw_error && lancer_chip(adapter)) {
4137
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004138 rtnl_lock();
4139 netif_device_detach(adapter->netdev);
4140 rtnl_unlock();
4141
4142 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004143 if (!status)
4144 netif_device_attach(adapter->netdev);
4145 }
4146
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004147 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4148 * no need to attempt further recovery.
4149 */
4150 if (!status || status == -EAGAIN)
4151 schedule_delayed_work(&adapter->func_recovery_work,
4152 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004153}
4154
4155static void be_worker(struct work_struct *work)
4156{
4157 struct be_adapter *adapter =
4158 container_of(work, struct be_adapter, work.work);
4159 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004160 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004161 int i;
4162
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004163 /* when interrupts are not yet enabled, just reap any pending
4164 * mcc completions */
4165 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004166 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004167 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004168 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004169 goto reschedule;
4170 }
4171
4172 if (!adapter->stats_cmd_sent) {
4173 if (lancer_chip(adapter))
4174 lancer_cmd_get_pport_stats(adapter,
4175 &adapter->stats_cmd);
4176 else
4177 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4178 }
4179
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004180 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4181 be_cmd_get_die_temperature(adapter);
4182
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004183 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004184 if (rxo->rx_post_starved) {
4185 rxo->rx_post_starved = false;
4186 be_post_rx_frags(rxo, GFP_KERNEL);
4187 }
4188 }
4189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004190 for_all_evt_queues(adapter, eqo, i)
4191 be_eqd_update(adapter, eqo);
4192
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004193reschedule:
4194 adapter->work_counter++;
4195 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4196}
4197
Sathya Perla257a3fe2013-06-14 15:54:51 +05304198/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004199static bool be_reset_required(struct be_adapter *adapter)
4200{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304201 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004202}
4203
Sathya Perlad3791422012-09-28 04:39:44 +00004204static char *mc_name(struct be_adapter *adapter)
4205{
4206 if (adapter->function_mode & FLEX10_MODE)
4207 return "FLEX10";
4208 else if (adapter->function_mode & VNIC_MODE)
4209 return "vNIC";
4210 else if (adapter->function_mode & UMC_ENABLED)
4211 return "UMC";
4212 else
4213 return "";
4214}
4215
4216static inline char *func_name(struct be_adapter *adapter)
4217{
4218 return be_physfn(adapter) ? "PF" : "VF";
4219}
4220
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004221static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004222{
4223 int status = 0;
4224 struct be_adapter *adapter;
4225 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004226 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004227
4228 status = pci_enable_device(pdev);
4229 if (status)
4230 goto do_none;
4231
4232 status = pci_request_regions(pdev, DRV_NAME);
4233 if (status)
4234 goto disable_dev;
4235 pci_set_master(pdev);
4236
Sathya Perla7f640062012-06-05 19:37:20 +00004237 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004238 if (netdev == NULL) {
4239 status = -ENOMEM;
4240 goto rel_reg;
4241 }
4242 adapter = netdev_priv(netdev);
4243 adapter->pdev = pdev;
4244 pci_set_drvdata(pdev, adapter);
4245 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004246 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004247
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004248 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004249 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004250 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4251 if (status < 0) {
4252 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4253 goto free_netdev;
4254 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 netdev->features |= NETIF_F_HIGHDMA;
4256 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004257 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304258 if (!status)
4259 status = dma_set_coherent_mask(&pdev->dev,
4260 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004261 if (status) {
4262 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4263 goto free_netdev;
4264 }
4265 }
4266
Sathya Perlad6b6d982012-09-05 01:56:48 +00004267 status = pci_enable_pcie_error_reporting(pdev);
4268 if (status)
4269 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4270
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004271 status = be_ctrl_init(adapter);
4272 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004273 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004274
Sathya Perla2243e2e2009-11-22 22:02:03 +00004275 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004276 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004277 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004278 if (status)
4279 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004280 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004281
Sathya Perla39f1d942012-05-08 19:41:24 +00004282 if (be_reset_required(adapter)) {
4283 status = be_cmd_reset_function(adapter);
4284 if (status)
4285 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004286
Kalesh AP2d177be2013-04-28 22:22:29 +00004287 /* Wait for interrupts to quiesce after an FLR */
4288 msleep(100);
4289 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004290
4291 /* Allow interrupts for other ULPs running on NIC function */
4292 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004293
Kalesh AP2d177be2013-04-28 22:22:29 +00004294 /* tell fw we're ready to fire cmds */
4295 status = be_cmd_fw_init(adapter);
4296 if (status)
4297 goto ctrl_clean;
4298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004299 status = be_stats_init(adapter);
4300 if (status)
4301 goto ctrl_clean;
4302
Sathya Perla39f1d942012-05-08 19:41:24 +00004303 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004304 if (status)
4305 goto stats_clean;
4306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004307 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004308 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004309 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004310
Sathya Perla5fb379e2009-06-18 00:02:59 +00004311 status = be_setup(adapter);
4312 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004313 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004314
Sathya Perla3abcded2010-10-03 22:12:27 -07004315 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004316 status = register_netdev(netdev);
4317 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004318 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004319
Parav Pandit045508a2012-03-26 14:27:13 +00004320 be_roce_dev_add(adapter);
4321
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004322 schedule_delayed_work(&adapter->func_recovery_work,
4323 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004324
4325 be_cmd_query_port_name(adapter, &port_name);
4326
Sathya Perlad3791422012-09-28 04:39:44 +00004327 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4328 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004329
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004330 return 0;
4331
Sathya Perla5fb379e2009-06-18 00:02:59 +00004332unsetup:
4333 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004334stats_clean:
4335 be_stats_cleanup(adapter);
4336ctrl_clean:
4337 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004338free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004339 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004340 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004341rel_reg:
4342 pci_release_regions(pdev);
4343disable_dev:
4344 pci_disable_device(pdev);
4345do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004346 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004347 return status;
4348}
4349
4350static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4351{
4352 struct be_adapter *adapter = pci_get_drvdata(pdev);
4353 struct net_device *netdev = adapter->netdev;
4354
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004355 if (adapter->wol)
4356 be_setup_wol(adapter, true);
4357
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004358 cancel_delayed_work_sync(&adapter->func_recovery_work);
4359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004360 netif_device_detach(netdev);
4361 if (netif_running(netdev)) {
4362 rtnl_lock();
4363 be_close(netdev);
4364 rtnl_unlock();
4365 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004366 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004367
4368 pci_save_state(pdev);
4369 pci_disable_device(pdev);
4370 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4371 return 0;
4372}
4373
4374static int be_resume(struct pci_dev *pdev)
4375{
4376 int status = 0;
4377 struct be_adapter *adapter = pci_get_drvdata(pdev);
4378 struct net_device *netdev = adapter->netdev;
4379
4380 netif_device_detach(netdev);
4381
4382 status = pci_enable_device(pdev);
4383 if (status)
4384 return status;
4385
Yijing Wang1ca01512013-06-27 20:53:42 +08004386 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004387 pci_restore_state(pdev);
4388
Sathya Perla2243e2e2009-11-22 22:02:03 +00004389 /* tell fw we're ready to fire cmds */
4390 status = be_cmd_fw_init(adapter);
4391 if (status)
4392 return status;
4393
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004394 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395 if (netif_running(netdev)) {
4396 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004397 be_open(netdev);
4398 rtnl_unlock();
4399 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004400
4401 schedule_delayed_work(&adapter->func_recovery_work,
4402 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004404
4405 if (adapter->wol)
4406 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004407
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004408 return 0;
4409}
4410
Sathya Perla82456b02010-02-17 01:35:37 +00004411/*
4412 * An FLR will stop BE from DMAing any data.
4413 */
4414static void be_shutdown(struct pci_dev *pdev)
4415{
4416 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004417
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004418 if (!adapter)
4419 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004420
Sathya Perla0f4a6822011-03-21 20:49:28 +00004421 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004422 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004423
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004424 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004425
Ajit Khaparde57841862011-04-06 18:08:43 +00004426 be_cmd_reset_function(adapter);
4427
Sathya Perla82456b02010-02-17 01:35:37 +00004428 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004429}
4430
Sathya Perlacf588472010-02-14 21:22:01 +00004431static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4432 pci_channel_state_t state)
4433{
4434 struct be_adapter *adapter = pci_get_drvdata(pdev);
4435 struct net_device *netdev = adapter->netdev;
4436
4437 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4438
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004439 if (!adapter->eeh_error) {
4440 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004441
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004442 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004443
Sathya Perlacf588472010-02-14 21:22:01 +00004444 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004445 netif_device_detach(netdev);
4446 if (netif_running(netdev))
4447 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004448 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004449
4450 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004451 }
Sathya Perlacf588472010-02-14 21:22:01 +00004452
4453 if (state == pci_channel_io_perm_failure)
4454 return PCI_ERS_RESULT_DISCONNECT;
4455
4456 pci_disable_device(pdev);
4457
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004458 /* The error could cause the FW to trigger a flash debug dump.
4459 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004460 * can cause it not to recover; wait for it to finish.
4461 * Wait only for first function as it is needed only once per
4462 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004463 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004464 if (pdev->devfn == 0)
4465 ssleep(30);
4466
Sathya Perlacf588472010-02-14 21:22:01 +00004467 return PCI_ERS_RESULT_NEED_RESET;
4468}
4469
4470static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4471{
4472 struct be_adapter *adapter = pci_get_drvdata(pdev);
4473 int status;
4474
4475 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004476
4477 status = pci_enable_device(pdev);
4478 if (status)
4479 return PCI_ERS_RESULT_DISCONNECT;
4480
4481 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004482 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004483 pci_restore_state(pdev);
4484
4485 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004486 dev_info(&adapter->pdev->dev,
4487 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004488 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004489 if (status)
4490 return PCI_ERS_RESULT_DISCONNECT;
4491
Sathya Perlad6b6d982012-09-05 01:56:48 +00004492 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004493 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004494 return PCI_ERS_RESULT_RECOVERED;
4495}
4496
4497static void be_eeh_resume(struct pci_dev *pdev)
4498{
4499 int status = 0;
4500 struct be_adapter *adapter = pci_get_drvdata(pdev);
4501 struct net_device *netdev = adapter->netdev;
4502
4503 dev_info(&adapter->pdev->dev, "EEH resume\n");
4504
4505 pci_save_state(pdev);
4506
Kalesh AP2d177be2013-04-28 22:22:29 +00004507 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004508 if (status)
4509 goto err;
4510
Kalesh AP2d177be2013-04-28 22:22:29 +00004511 /* tell fw we're ready to fire cmds */
4512 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004513 if (status)
4514 goto err;
4515
Sathya Perlacf588472010-02-14 21:22:01 +00004516 status = be_setup(adapter);
4517 if (status)
4518 goto err;
4519
4520 if (netif_running(netdev)) {
4521 status = be_open(netdev);
4522 if (status)
4523 goto err;
4524 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004525
4526 schedule_delayed_work(&adapter->func_recovery_work,
4527 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004528 netif_device_attach(netdev);
4529 return;
4530err:
4531 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004532}
4533
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004534static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004535 .error_detected = be_eeh_err_detected,
4536 .slot_reset = be_eeh_reset,
4537 .resume = be_eeh_resume,
4538};
4539
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004540static struct pci_driver be_driver = {
4541 .name = DRV_NAME,
4542 .id_table = be_dev_ids,
4543 .probe = be_probe,
4544 .remove = be_remove,
4545 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004546 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004547 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004548 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549};
4550
4551static int __init be_init_module(void)
4552{
Joe Perches8e95a202009-12-03 07:58:21 +00004553 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4554 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555 printk(KERN_WARNING DRV_NAME
4556 " : Module param rx_frag_size must be 2048/4096/8192."
4557 " Using 2048\n");
4558 rx_frag_size = 2048;
4559 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004560
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561 return pci_register_driver(&be_driver);
4562}
4563module_init(be_init_module);
4564
4565static void __exit be_exit_module(void)
4566{
4567 pci_unregister_driver(&be_driver);
4568}
4569module_exit(be_exit_module);