blob: 961e9f0500c57635f08670d0a88181bc3e808b94 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070025
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000029MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030MODULE_LICENSE("GPL");
31
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla11ac75e2011-12-13 00:58:50 +000036static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070049 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000052/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070053static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000054 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070088static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000089 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700112 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122
Sathya Perla752961a2011-10-24 02:45:03 +0000123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000136 mem->va = NULL;
137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000152 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153 return 0;
154}
155
Somnath Kotur68c45a22013-03-14 02:42:07 +0000156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Somnath Kotur68c45a22013-03-14 02:42:07 +0000175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000207
208 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000220 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Sathya Perla5a712c12013-07-23 15:24:59 +0530260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000265 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 }
278
Sathya Perla5a712c12013-07-23 15:24:59 +0530279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000283 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000284 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 return 0;
297err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 return status;
300}
301
Sathya Perlaca34fe32012-11-06 17:48:56 +0000302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314}
315
316/* BE2 supports only v0 cmd */
317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318{
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328}
329
330static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000331{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000335 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000338
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377}
378
Sathya Perlaca34fe32012-11-06 17:48:56 +0000379static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423}
424
Selvin Xavier005d5692011-05-16 07:36:35 +0000425static void populate_lancer_stats(struct be_adapter *adapter)
426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427
Selvin Xavier005d5692011-05-16 07:36:35 +0000428 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000457 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000461 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000462}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463
Sathya Perla09c1c682011-08-22 19:41:53 +0000464static void accumulate_16bit_val(u32 *acc, u16 val)
465{
466#define lo(x) (x & 0xFFFF)
467#define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474}
475
Jingoo Han4188e7d2013-08-05 18:02:02 +0900476static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479{
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488}
489
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490void be_parse_stats(struct be_adapter *adapter)
491{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000495 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000496
Sathya Perlaca34fe32012-11-06 17:48:56 +0000497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505
Sathya Perlaca34fe32012-11-06 17:48:56 +0000506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000510 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000511 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512}
513
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000517 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000518 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700519 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000520 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000521 u64 pkts, bytes;
522 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700523 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524
Sathya Perla3abcded2010-10-03 22:12:27 -0700525 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700537 }
538
Sathya Perla3c8def92011-06-12 20:01:58 +0000539 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549
550 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000560 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000563 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000566
Sathya Perlaab1594e2011-07-25 19:10:15 +0000567 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568
569 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000571
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000577 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578}
579
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 struct net_device *netdev = adapter->netdev;
583
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000585 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593}
594
Sathya Perla3c8def92011-06-12 20:01:58 +0000595static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597{
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_stats *stats = tx_stats(txo);
599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000606 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608}
609
610/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 /* to account for hdr wrb */
619 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629}
630
631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632{
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000636 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637}
638
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641{
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653}
654
Somnath Koturcc4ce022010-10-21 07:11:14 -0700655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000658 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000664 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700677 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688}
689
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000691 bool unmap_single)
692{
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000698 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000699 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000702 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000704 }
705}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Sathya Perla3c8def92011-06-12 20:01:58 +0000707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710{
Sathya Perla7101e112010-03-22 20:41:12 +0000711 dma_addr_t busaddr;
712 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000713 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000717 bool map_single = false;
718 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000722 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723
David S. Millerebc8d2a2009-06-09 01:01:31 -0700724 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700725 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000728 goto dma_err;
729 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
David S. Millerebc8d2a2009-06-09 01:01:31 -0700737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000738 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700739 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000740 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000741 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000742 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000743 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700744 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000748 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000762dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Somnath Kotur93040ae2012-06-26 22:32:10 +0000774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000777{
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000784 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000796
797 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000799 if (unlikely(!skb))
800 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
Somnath Kotur93040ae2012-06-26 22:32:10 +0000814 return skb;
815}
816
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818{
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837}
838
839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840{
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842}
843
Sathya Perlaee9c7992013-05-22 23:04:55 +0000844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000846{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000848}
849
Sathya Perlaee9c7992013-05-22 23:04:55 +0000850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857
Somnath Kotur48265662013-05-26 21:08:47 +0000858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length.
861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000870 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000871 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000876 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
880
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000886 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000887
Somnath Kotur93040ae2012-06-26 22:32:10 +0000888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000917 if (unlikely(!skb))
918 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000919 }
920
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 return skb;
922tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925}
926
927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928{
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb)
939 return NETDEV_TX_OK;
940
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000943 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000945 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000946 int gso_segs = skb_shinfo(skb)->gso_segs;
947
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000948 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000949 BUG_ON(txo->sent_skb_list[start]);
950 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000952 /* Ensure txq has space for the next skb; Else stop the queue
953 * *BEFORE* ringing the tx doorbell, so that we serialze the
954 * tx compls of the current transmit which'll wake up the queue
955 */
Sathya Perla7101e112010-03-22 20:41:12 +0000956 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000957 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000959 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000960 stopped = true;
961 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000963 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000964
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000966 } else {
967 txq->head = start;
968 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970 return NETDEV_TX_OK;
971}
972
973static int be_change_mtu(struct net_device *netdev, int new_mtu)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000977 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700979 dev_info(&adapter->pdev->dev,
980 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000981 BE_MIN_MTU,
982 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983 return -EINVAL;
984 }
985 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986 netdev->mtu, new_mtu);
987 netdev->mtu = new_mtu;
988 return 0;
989}
990
991/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700994 */
Sathya Perla10329df2012-06-05 19:37:18 +0000995static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996{
Sathya Perla10329df2012-06-05 19:37:18 +0000997 u16 vids[BE_NUM_VLANS_SUPPORTED];
998 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000999 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001000
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001001 /* No need to further configure vids if in promiscuous mode */
1002 if (adapter->promiscuous)
1003 return 0;
1004
Sathya Perla92bf14a2013-08-27 16:57:32 +05301005 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001006 goto set_vlan_promisc;
1007
1008 /* Construct VLAN Table to give to HW */
1009 for (i = 0; i < VLAN_N_VID; i++)
1010 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001011 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001012
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001014 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1020 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022
Sathya Perlab31c50a2009-09-17 10:30:13 -07001023 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001024
1025set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1027 NULL, 0, 1, 1);
1028 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029}
1030
Patrick McHardy80d5c362013-04-19 02:04:28 +00001031static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032{
1033 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001034 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001037 status = -EINVAL;
1038 goto ret;
1039 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001040
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001041 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0)
1043 goto ret;
1044
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301046 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001047 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001048
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001049 if (!status)
1050 adapter->vlans_added++;
1051 else
1052 adapter->vlan_tag[vid] = 0;
1053ret:
1054 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055}
1056
Patrick McHardy80d5c362013-04-19 02:04:28 +00001057static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058{
1059 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001060 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001063 status = -EINVAL;
1064 goto ret;
1065 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001066
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001067 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0)
1069 goto ret;
1070
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301072 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001073 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001074
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001075 if (!status)
1076 adapter->vlans_added--;
1077 else
1078 adapter->vlan_tag[vid] = 1;
1079ret:
1080 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081}
1082
Sathya Perlaa54769f2011-10-24 02:45:00 +00001083static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084{
1085 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001086 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087
1088 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001089 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001090 adapter->promiscuous = true;
1091 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001092 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001093
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001094 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001095 if (adapter->promiscuous) {
1096 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001097 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001098
1099 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001100 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001101 }
1102
Sathya Perlae7b909a2009-11-22 22:01:10 +00001103 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001104 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301105 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001107 goto done;
1108 }
1109
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001110 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1111 struct netdev_hw_addr *ha;
1112 int i = 1; /* First slot is claimed by the Primary MAC */
1113
1114 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1115 be_cmd_pmac_del(adapter, adapter->if_handle,
1116 adapter->pmac_id[i], 0);
1117 }
1118
Sathya Perla92bf14a2013-08-27 16:57:32 +05301119 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001120 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1121 adapter->promiscuous = true;
1122 goto done;
1123 }
1124
1125 netdev_for_each_uc_addr(ha, adapter->netdev) {
1126 adapter->uc_macs++; /* First slot is for Primary MAC */
1127 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1128 adapter->if_handle,
1129 &adapter->pmac_id[adapter->uc_macs], 0);
1130 }
1131 }
1132
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1134
1135 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1136 if (status) {
1137 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1138 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1139 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1140 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001141done:
1142 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143}
1144
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001145static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1146{
1147 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001148 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001149 int status;
1150
Sathya Perla11ac75e2011-12-13 00:58:50 +00001151 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001152 return -EPERM;
1153
Sathya Perla11ac75e2011-12-13 00:58:50 +00001154 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001155 return -EINVAL;
1156
Sathya Perla3175d8c2013-07-23 15:25:03 +05301157 if (BEx_chip(adapter)) {
1158 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1159 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001160
Sathya Perla11ac75e2011-12-13 00:58:50 +00001161 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1162 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301163 } else {
1164 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1165 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001166 }
1167
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001168 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001169 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1170 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001171 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001172 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001173
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001174 return status;
1175}
1176
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001177static int be_get_vf_config(struct net_device *netdev, int vf,
1178 struct ifla_vf_info *vi)
1179{
1180 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001181 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001182
Sathya Perla11ac75e2011-12-13 00:58:50 +00001183 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001184 return -EPERM;
1185
Sathya Perla11ac75e2011-12-13 00:58:50 +00001186 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001187 return -EINVAL;
1188
1189 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001190 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001192 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001194
1195 return 0;
1196}
1197
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001198static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos)
1200{
1201 struct be_adapter *adapter = netdev_priv(netdev);
1202 int status = 0;
1203
Sathya Perla11ac75e2011-12-13 00:58:50 +00001204 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001205 return -EPERM;
1206
Sathya Perla11ac75e2011-12-13 00:58:50 +00001207 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001208 return -EINVAL;
1209
1210 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001211 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan;
1214
1215 status = be_cmd_set_hsw_config(adapter, vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05001216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001217 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001218 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001219 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001220 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001221 vlan = adapter->vf_cfg[vf].def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05001223 adapter->vf_cfg[vf].if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001224 }
1225
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001226
1227 if (status)
1228 dev_info(&adapter->pdev->dev,
1229 "VLAN %d config on VF %d failed\n", vlan, vf);
1230 return status;
1231}
1232
Ajit Khapardee1d18732010-07-23 01:52:13 +00001233static int be_set_vf_tx_rate(struct net_device *netdev,
1234 int vf, int rate)
1235{
1236 struct be_adapter *adapter = netdev_priv(netdev);
1237 int status = 0;
1238
Sathya Perla11ac75e2011-12-13 00:58:50 +00001239 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001240 return -EPERM;
1241
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001242 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001243 return -EINVAL;
1244
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001245 if (rate < 100 || rate > 10000) {
1246 dev_err(&adapter->pdev->dev,
1247 "tx rate must be between 100 and 10000 Mbps\n");
1248 return -EINVAL;
1249 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001250
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001251 if (lancer_chip(adapter))
1252 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1253 else
1254 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001255
1256 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001257 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001258 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001259 else
1260 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001261 return status;
1262}
1263
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001264static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001266 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001267 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001268 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001269 u64 pkts;
1270 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001271
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001272 if (!eqo->enable_aic) {
1273 eqd = eqo->eqd;
1274 goto modify_eqd;
1275 }
1276
1277 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001278 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001280 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1281
Sathya Perla4097f662009-03-24 16:40:13 -07001282 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001283 if (time_before(now, stats->rx_jiffies)) {
1284 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001285 return;
1286 }
1287
Sathya Perlaac124ff2011-07-25 19:10:14 +00001288 /* Update once a second */
1289 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001290 return;
1291
Sathya Perlaab1594e2011-07-25 19:10:15 +00001292 do {
1293 start = u64_stats_fetch_begin_bh(&stats->sync);
1294 pkts = stats->rx_pkts;
1295 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1296
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001297 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001298 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001299 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300 eqd = (stats->rx_pps / 110000) << 3;
1301 eqd = min(eqd, eqo->max_eqd);
1302 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001303 if (eqd < 10)
1304 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001305
1306modify_eqd:
1307 if (eqd != eqo->cur_eqd) {
1308 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1309 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001310 }
Sathya Perla4097f662009-03-24 16:40:13 -07001311}
1312
Sathya Perla3abcded2010-10-03 22:12:27 -07001313static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001314 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001315{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001316 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001317
Sathya Perlaab1594e2011-07-25 19:10:15 +00001318 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001319 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001320 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001321 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001322 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001323 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001324 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001325 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001326 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327}
1328
Sathya Perla2e588f82011-03-11 02:49:26 +00001329static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001330{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001331 /* L4 checksum is not reliable for non TCP/UDP packets.
1332 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1334 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001335}
1336
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001337static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1338 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001340 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001342 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343
Sathya Perla3abcded2010-10-03 22:12:27 -07001344 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 BUG_ON(!rx_page_info->page);
1346
Ajit Khaparde205859a2010-02-09 01:34:21 +00001347 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001348 dma_unmap_page(&adapter->pdev->dev,
1349 dma_unmap_addr(rx_page_info, bus),
1350 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001351 rx_page_info->last_page_user = false;
1352 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353
1354 atomic_dec(&rxq->used);
1355 return rx_page_info;
1356}
1357
1358/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001359static void be_rx_compl_discard(struct be_rx_obj *rxo,
1360 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361{
Sathya Perla3abcded2010-10-03 22:12:27 -07001362 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001364 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001366 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001367 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001368 put_page(page_info->page);
1369 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001370 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 }
1372}
1373
1374/*
1375 * skb_fill_rx_data forms a complete skb for an ether frame
1376 * indicated by rxcp.
1377 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1379 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla3abcded2010-10-03 22:12:27 -07001381 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001383 u16 i, j;
1384 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 u8 *start;
1386
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001387 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 start = page_address(page_info->page) + page_info->page_offset;
1389 prefetch(start);
1390
1391 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001392 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394 skb->len = curr_frag_len;
1395 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001396 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 /* Complete packet has now been moved to data */
1398 put_page(page_info->page);
1399 skb->data_len = 0;
1400 skb->tail += curr_frag_len;
1401 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001402 hdr_len = ETH_HLEN;
1403 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001405 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 skb_shinfo(skb)->frags[0].page_offset =
1407 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001408 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001410 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 skb->tail += hdr_len;
1412 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001413 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414
Sathya Perla2e588f82011-03-11 02:49:26 +00001415 if (rxcp->pkt_size <= rx_frag_size) {
1416 BUG_ON(rxcp->num_rcvd != 1);
1417 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 }
1419
1420 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001421 index_inc(&rxcp->rxq_idx, rxq->len);
1422 remaining = rxcp->pkt_size - curr_frag_len;
1423 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001424 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001425 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001427 /* Coalesce all frags from the same physical page in one slot */
1428 if (page_info->page_offset == 0) {
1429 /* Fresh page */
1430 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001431 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001432 skb_shinfo(skb)->frags[j].page_offset =
1433 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001434 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001435 skb_shinfo(skb)->nr_frags++;
1436 } else {
1437 put_page(page_info->page);
1438 }
1439
Eric Dumazet9e903e02011-10-18 21:00:24 +00001440 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 skb->len += curr_frag_len;
1442 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001443 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001444 remaining -= curr_frag_len;
1445 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001446 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001448 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001451/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001452static void be_rx_compl_process(struct be_rx_obj *rxo,
1453 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001455 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001456 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001458
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001459 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001460 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001461 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001462 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 return;
1464 }
1465
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001466 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001468 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001469 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001470 else
1471 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001473 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001474 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001475 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001476 skb->rxhash = rxcp->rss_hash;
1477
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
Jiri Pirko343e43c2011-08-25 02:50:51 +00001479 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001480 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001481
1482 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483}
1484
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001485/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001486static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1487 struct napi_struct *napi,
1488 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001490 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001492 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001493 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001494 u16 remaining, curr_frag_len;
1495 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001500 return;
1501 }
1502
Sathya Perla2e588f82011-03-11 02:49:26 +00001503 remaining = rxcp->pkt_size;
1504 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001505 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506
1507 curr_frag_len = min(remaining, rx_frag_size);
1508
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001509 /* Coalesce all frags from the same physical page in one slot */
1510 if (i == 0 || page_info->page_offset == 0) {
1511 /* First frag or Fresh page */
1512 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001513 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001514 skb_shinfo(skb)->frags[j].page_offset =
1515 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001516 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001517 } else {
1518 put_page(page_info->page);
1519 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001520 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001521 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 memset(page_info, 0, sizeof(*page_info));
1525 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001526 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001528 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 skb->len = rxcp->pkt_size;
1530 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001531 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001532 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001533 if (adapter->netdev->features & NETIF_F_RXHASH)
1534 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001535
Jiri Pirko343e43c2011-08-25 02:50:51 +00001536 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001537 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001538
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540}
1541
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001542static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1543 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
Sathya Perla2e588f82011-03-11 02:49:26 +00001545 rxcp->pkt_size =
1546 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1547 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1548 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1549 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001550 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 rxcp->ip_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1553 rxcp->l4_csum =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1555 rxcp->ipv6 =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1557 rxcp->rxq_idx =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1559 rxcp->num_rcvd =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1561 rxcp->pkt_type =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001563 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001565 if (rxcp->vlanf) {
1566 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001567 compl);
1568 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1569 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001570 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001571 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001572}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1575 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001576{
1577 rxcp->pkt_size =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1579 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1580 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1581 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001582 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001583 rxcp->ip_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1585 rxcp->l4_csum =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1587 rxcp->ipv6 =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1589 rxcp->rxq_idx =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1591 rxcp->num_rcvd =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1593 rxcp->pkt_type =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001595 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001597 if (rxcp->vlanf) {
1598 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001599 compl);
1600 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1601 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001602 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001603 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001604 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1605 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001606}
1607
1608static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1609{
1610 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1611 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1612 struct be_adapter *adapter = rxo->adapter;
1613
1614 /* For checking the valid bit it is Ok to use either definition as the
1615 * valid bit is at the same position in both v0 and v1 Rx compl */
1616 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 return NULL;
1618
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001619 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001620 be_dws_le_to_cpu(compl, sizeof(*compl));
1621
1622 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001624 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001625 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001626
Somnath Koture38b1702013-05-29 22:55:56 +00001627 if (rxcp->ip_frag)
1628 rxcp->l4_csum = 0;
1629
Sathya Perla15d72182011-03-21 20:49:26 +00001630 if (rxcp->vlanf) {
1631 /* vlanf could be wrongly set in some cards.
1632 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001633 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001634 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001635
Sathya Perla15d72182011-03-21 20:49:26 +00001636 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001637 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001638
Somnath Kotur939cf302011-08-18 21:51:49 -07001639 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001640 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001641 rxcp->vlanf = 0;
1642 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001643
1644 /* As the compl has been parsed, reset it; we wont touch it again */
1645 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Sathya Perla3abcded2010-10-03 22:12:27 -07001647 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 return rxcp;
1649}
1650
Eric Dumazet1829b082011-03-01 05:48:12 +00001651static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001654
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001656 gfp |= __GFP_COMP;
1657 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658}
1659
1660/*
1661 * Allocate a page, split it to fragments of size rx_frag_size and post as
1662 * receive buffers to BE
1663 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001664static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665{
Sathya Perla3abcded2010-10-03 22:12:27 -07001666 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001667 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001668 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669 struct page *pagep = NULL;
1670 struct be_eth_rx_d *rxd;
1671 u64 page_dmaaddr = 0, frag_dmaaddr;
1672 u32 posted, page_offset = 0;
1673
Sathya Perla3abcded2010-10-03 22:12:27 -07001674 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1676 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001677 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001679 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 break;
1681 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001682 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1683 0, adapter->big_page_size,
1684 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 page_info->page_offset = 0;
1686 } else {
1687 get_page(pagep);
1688 page_info->page_offset = page_offset + rx_frag_size;
1689 }
1690 page_offset = page_info->page_offset;
1691 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001692 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1694
1695 rxd = queue_head_node(rxq);
1696 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1697 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
1699 /* Any space left in the current big page for another frag? */
1700 if ((page_offset + rx_frag_size + rx_frag_size) >
1701 adapter->big_page_size) {
1702 pagep = NULL;
1703 page_info->last_page_user = true;
1704 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001705
1706 prev_page_info = page_info;
1707 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 }
1710 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001711 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712
1713 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001715 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001716 } else if (atomic_read(&rxq->used) == 0) {
1717 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001718 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720}
1721
Sathya Perla5fb379e2009-06-18 00:02:59 +00001722static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1725
1726 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1727 return NULL;
1728
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001729 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1731
1732 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1733
1734 queue_tail_inc(tx_cq);
1735 return txcp;
1736}
1737
Sathya Perla3c8def92011-06-12 20:01:58 +00001738static u16 be_tx_compl_process(struct be_adapter *adapter,
1739 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740{
Sathya Perla3c8def92011-06-12 20:01:58 +00001741 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001742 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001743 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001745 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1746 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001748 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001750 sent_skbs[txq->tail] = NULL;
1751
1752 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001753 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001755 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001757 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001758 unmap_tx_frag(&adapter->pdev->dev, wrb,
1759 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001760 unmap_skb_hdr = false;
1761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 num_wrbs++;
1763 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001764 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001767 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768}
1769
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001770/* Return the number of events in the event queue */
1771static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001772{
1773 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001774 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001775
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001776 do {
1777 eqe = queue_tail_node(&eqo->q);
1778 if (eqe->evt == 0)
1779 break;
1780
1781 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001782 eqe->evt = 0;
1783 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784 queue_tail_inc(&eqo->q);
1785 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001786
1787 return num;
1788}
1789
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001790/* Leaves the EQ is disarmed state */
1791static void be_eq_clean(struct be_eq_obj *eqo)
1792{
1793 int num = events_get(eqo);
1794
1795 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1796}
1797
1798static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799{
1800 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_queue_info *rxq = &rxo->q;
1802 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001803 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001804 struct be_adapter *adapter = rxo->adapter;
1805 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 u16 tail;
1807
Sathya Perlad23e9462012-12-17 19:38:51 +00001808 /* Consume pending rx completions.
1809 * Wait for the flush completion (identified by zero num_rcvd)
1810 * to arrive. Notify CQ even when there are no more CQ entries
1811 * for HW to flush partially coalesced CQ entries.
1812 * In Lancer, there is no need to wait for flush compl.
1813 */
1814 for (;;) {
1815 rxcp = be_rx_compl_get(rxo);
1816 if (rxcp == NULL) {
1817 if (lancer_chip(adapter))
1818 break;
1819
1820 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1821 dev_warn(&adapter->pdev->dev,
1822 "did not receive flush compl\n");
1823 break;
1824 }
1825 be_cq_notify(adapter, rx_cq->id, true, 0);
1826 mdelay(1);
1827 } else {
1828 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001829 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001830 if (rxcp->num_rcvd == 0)
1831 break;
1832 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833 }
1834
Sathya Perlad23e9462012-12-17 19:38:51 +00001835 /* After cleanup, leave the CQ in unarmed state */
1836 be_cq_notify(adapter, rx_cq->id, false, 0);
1837
1838 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001840 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001841 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842 put_page(page_info->page);
1843 memset(page_info, 0, sizeof(*page_info));
1844 }
1845 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001846 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847}
1848
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001849static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001851 struct be_tx_obj *txo;
1852 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001853 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001854 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001855 struct sk_buff *sent_skb;
1856 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001857 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Sathya Perlaa8e91792009-08-10 03:42:43 +00001859 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1860 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001861 pending_txqs = adapter->num_tx_qs;
1862
1863 for_all_tx_queues(adapter, txo, i) {
1864 txq = &txo->q;
1865 while ((txcp = be_tx_compl_get(&txo->cq))) {
1866 end_idx =
1867 AMAP_GET_BITS(struct amap_eth_tx_compl,
1868 wrb_index, txcp);
1869 num_wrbs += be_tx_compl_process(adapter, txo,
1870 end_idx);
1871 cmpl++;
1872 }
1873 if (cmpl) {
1874 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1875 atomic_sub(num_wrbs, &txq->used);
1876 cmpl = 0;
1877 num_wrbs = 0;
1878 }
1879 if (atomic_read(&txq->used) == 0)
1880 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001881 }
1882
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001883 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001884 break;
1885
1886 mdelay(1);
1887 } while (true);
1888
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001889 for_all_tx_queues(adapter, txo, i) {
1890 txq = &txo->q;
1891 if (atomic_read(&txq->used))
1892 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1893 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001894
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001895 /* free posted tx for which compls will never arrive */
1896 while (atomic_read(&txq->used)) {
1897 sent_skb = txo->sent_skb_list[txq->tail];
1898 end_idx = txq->tail;
1899 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1900 &dummy_wrb);
1901 index_adv(&end_idx, num_wrbs - 1, txq->len);
1902 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1903 atomic_sub(num_wrbs, &txq->used);
1904 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001905 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906}
1907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908static void be_evt_queues_destroy(struct be_adapter *adapter)
1909{
1910 struct be_eq_obj *eqo;
1911 int i;
1912
1913 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001914 if (eqo->q.created) {
1915 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301917 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001918 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 be_queue_free(adapter, &eqo->q);
1920 }
1921}
1922
1923static int be_evt_queues_create(struct be_adapter *adapter)
1924{
1925 struct be_queue_info *eq;
1926 struct be_eq_obj *eqo;
1927 int i, rc;
1928
Sathya Perla92bf14a2013-08-27 16:57:32 +05301929 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1930 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001931
1932 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301933 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1934 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001935 eqo->adapter = adapter;
1936 eqo->tx_budget = BE_TX_BUDGET;
1937 eqo->idx = i;
1938 eqo->max_eqd = BE_MAX_EQD;
1939 eqo->enable_aic = true;
1940
1941 eq = &eqo->q;
1942 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1943 sizeof(struct be_eq_entry));
1944 if (rc)
1945 return rc;
1946
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301947 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 if (rc)
1949 return rc;
1950 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001951 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952}
1953
Sathya Perla5fb379e2009-06-18 00:02:59 +00001954static void be_mcc_queues_destroy(struct be_adapter *adapter)
1955{
1956 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001957
Sathya Perla8788fdc2009-07-27 22:52:03 +00001958 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001959 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001960 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001961 be_queue_free(adapter, q);
1962
Sathya Perla8788fdc2009-07-27 22:52:03 +00001963 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001964 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001965 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001966 be_queue_free(adapter, q);
1967}
1968
1969/* Must be called only after TX qs are created as MCC shares TX EQ */
1970static int be_mcc_queues_create(struct be_adapter *adapter)
1971{
1972 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973
Sathya Perla8788fdc2009-07-27 22:52:03 +00001974 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001975 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001976 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977 goto err;
1978
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979 /* Use the default EQ for MCC completions */
1980 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981 goto mcc_cq_free;
1982
Sathya Perla8788fdc2009-07-27 22:52:03 +00001983 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001984 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1985 goto mcc_cq_destroy;
1986
Sathya Perla8788fdc2009-07-27 22:52:03 +00001987 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001988 goto mcc_q_free;
1989
1990 return 0;
1991
1992mcc_q_free:
1993 be_queue_free(adapter, q);
1994mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001995 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001996mcc_cq_free:
1997 be_queue_free(adapter, cq);
1998err:
1999 return -1;
2000}
2001
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002static void be_tx_queues_destroy(struct be_adapter *adapter)
2003{
2004 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002005 struct be_tx_obj *txo;
2006 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007
Sathya Perla3c8def92011-06-12 20:01:58 +00002008 for_all_tx_queues(adapter, txo, i) {
2009 q = &txo->q;
2010 if (q->created)
2011 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2012 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013
Sathya Perla3c8def92011-06-12 20:01:58 +00002014 q = &txo->cq;
2015 if (q->created)
2016 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2017 be_queue_free(adapter, q);
2018 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019}
2020
Sathya Perla77071332013-08-27 16:57:34 +05302021static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002024 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302025 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026
Sathya Perla92bf14a2013-08-27 16:57:32 +05302027 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002028
Sathya Perla3c8def92011-06-12 20:01:58 +00002029 for_all_tx_queues(adapter, txo, i) {
2030 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2032 sizeof(struct be_eth_tx_compl));
2033 if (status)
2034 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 /* If num_evt_qs is less than num_tx_qs, then more than
2037 * one txq share an eq
2038 */
2039 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2040 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2041 if (status)
2042 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2045 sizeof(struct be_eth_wrb));
2046 if (status)
2047 return status;
2048
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002049 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 if (status)
2051 return status;
2052 }
2053
Sathya Perlad3791422012-09-28 04:39:44 +00002054 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2055 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056 return 0;
2057}
2058
2059static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060{
2061 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002062 struct be_rx_obj *rxo;
2063 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064
Sathya Perla3abcded2010-10-03 22:12:27 -07002065 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002066 q = &rxo->cq;
2067 if (q->created)
2068 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2069 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071}
2072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002074{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 struct be_rx_obj *rxo;
2077 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078
Sathya Perla92bf14a2013-08-27 16:57:32 +05302079 /* We can create as many RSS rings as there are EQs. */
2080 adapter->num_rx_qs = adapter->num_evt_qs;
2081
2082 /* We'll use RSS only if atleast 2 RSS rings are supported.
2083 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002084 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302085 if (adapter->num_rx_qs > 1)
2086 adapter->num_rx_qs++;
2087
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002089 for_all_rx_queues(adapter, rxo, i) {
2090 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002091 cq = &rxo->cq;
2092 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2093 sizeof(struct be_eth_rx_compl));
2094 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2098 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002099 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002101 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102
Sathya Perlad3791422012-09-28 04:39:44 +00002103 dev_info(&adapter->pdev->dev,
2104 "created %d RSS queue(s) and 1 default RX queue\n",
2105 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002107}
2108
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109static irqreturn_t be_intx(int irq, void *dev)
2110{
Sathya Perlae49cc342012-11-27 19:50:02 +00002111 struct be_eq_obj *eqo = dev;
2112 struct be_adapter *adapter = eqo->adapter;
2113 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002115 /* IRQ is not expected when NAPI is scheduled as the EQ
2116 * will not be armed.
2117 * But, this can happen on Lancer INTx where it takes
2118 * a while to de-assert INTx or in BE2 where occasionaly
2119 * an interrupt may be raised even when EQ is unarmed.
2120 * If NAPI is already scheduled, then counting & notifying
2121 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002122 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002123 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002124 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002125 __napi_schedule(&eqo->napi);
2126 if (num_evts)
2127 eqo->spurious_intr = 0;
2128 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002129 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002130
2131 /* Return IRQ_HANDLED only for the the first spurious intr
2132 * after a valid intr to stop the kernel from branding
2133 * this irq as a bad one!
2134 */
2135 if (num_evts || eqo->spurious_intr++ == 0)
2136 return IRQ_HANDLED;
2137 else
2138 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139}
2140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144
Sathya Perla0b545a62012-11-23 00:27:18 +00002145 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2146 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147 return IRQ_HANDLED;
2148}
2149
Sathya Perla2e588f82011-03-11 02:49:26 +00002150static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151{
Somnath Koture38b1702013-05-29 22:55:56 +00002152 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153}
2154
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2156 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157{
Sathya Perla3abcded2010-10-03 22:12:27 -07002158 struct be_adapter *adapter = rxo->adapter;
2159 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002160 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 u32 work_done;
2162
2163 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002164 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 if (!rxcp)
2166 break;
2167
Sathya Perla12004ae2011-08-02 19:57:46 +00002168 /* Is it a flush compl that has no data */
2169 if (unlikely(rxcp->num_rcvd == 0))
2170 goto loop_continue;
2171
2172 /* Discard compl with partial DMA Lancer B0 */
2173 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002175 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002176 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002177
Sathya Perla12004ae2011-08-02 19:57:46 +00002178 /* On BE drop pkts that arrive due to imperfect filtering in
2179 * promiscuous mode on some skews
2180 */
2181 if (unlikely(rxcp->port != adapter->port_num &&
2182 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002184 goto loop_continue;
2185 }
2186
2187 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002189 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002191loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002192 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 }
2194
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002195 if (work_done) {
2196 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2199 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202 return work_done;
2203}
2204
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2206 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002211 for (work_done = 0; work_done < budget; work_done++) {
2212 txcp = be_tx_compl_get(&txo->cq);
2213 if (!txcp)
2214 break;
2215 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002216 AMAP_GET_BITS(struct amap_eth_tx_compl,
2217 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 }
2219
2220 if (work_done) {
2221 be_cq_notify(adapter, txo->cq.id, true, work_done);
2222 atomic_sub(num_wrbs, &txo->q.used);
2223
2224 /* As Tx wrbs have been freed up, wake up netdev queue
2225 * if it was stopped due to lack of tx wrbs. */
2226 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2227 atomic_read(&txo->q.used) < txo->q.len / 2) {
2228 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002229 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002230
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2232 tx_stats(txo)->tx_compl += work_done;
2233 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2234 }
2235 return (work_done < budget); /* Done */
2236}
Sathya Perla3c8def92011-06-12 20:01:58 +00002237
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302238int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002239{
2240 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2241 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002242 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002243 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002244
Sathya Perla0b545a62012-11-23 00:27:18 +00002245 num_evts = events_get(eqo);
2246
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 /* Process all TXQs serviced by this EQ */
2248 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2249 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2250 eqo->tx_budget, i);
2251 if (!tx_done)
2252 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 }
2254
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002255 /* This loop will iterate twice for EQ0 in which
2256 * completions of the last RXQ (default one) are also processed
2257 * For other EQs the loop iterates only once
2258 */
2259 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2260 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2261 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002262 }
2263
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264 if (is_mcc_eqo(eqo))
2265 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002266
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 if (max_work < budget) {
2268 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002269 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 } else {
2271 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002272 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002273 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002277void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002278{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002279 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2280 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002281 u32 i;
2282
Sathya Perlad23e9462012-12-17 19:38:51 +00002283 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002284 return;
2285
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002286 if (lancer_chip(adapter)) {
2287 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2288 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2289 sliport_err1 = ioread32(adapter->db +
2290 SLIPORT_ERROR1_OFFSET);
2291 sliport_err2 = ioread32(adapter->db +
2292 SLIPORT_ERROR2_OFFSET);
2293 }
2294 } else {
2295 pci_read_config_dword(adapter->pdev,
2296 PCICFG_UE_STATUS_LOW, &ue_lo);
2297 pci_read_config_dword(adapter->pdev,
2298 PCICFG_UE_STATUS_HIGH, &ue_hi);
2299 pci_read_config_dword(adapter->pdev,
2300 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2301 pci_read_config_dword(adapter->pdev,
2302 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002303
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002304 ue_lo = (ue_lo & ~ue_lo_mask);
2305 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002306 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002307
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002308 /* On certain platforms BE hardware can indicate spurious UEs.
2309 * Allow the h/w to stop working completely in case of a real UE.
2310 * Hence not setting the hw_error for UE detection.
2311 */
2312 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002313 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002314 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002315 "Error detected in the card\n");
2316 }
2317
2318 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2319 dev_err(&adapter->pdev->dev,
2320 "ERR: sliport status 0x%x\n", sliport_status);
2321 dev_err(&adapter->pdev->dev,
2322 "ERR: sliport error1 0x%x\n", sliport_err1);
2323 dev_err(&adapter->pdev->dev,
2324 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002325 }
2326
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002327 if (ue_lo) {
2328 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2329 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002330 dev_err(&adapter->pdev->dev,
2331 "UE: %s bit set\n", ue_status_low_desc[i]);
2332 }
2333 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002334
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002335 if (ue_hi) {
2336 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2337 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002338 dev_err(&adapter->pdev->dev,
2339 "UE: %s bit set\n", ue_status_hi_desc[i]);
2340 }
2341 }
2342
2343}
2344
Sathya Perla8d56ff12009-11-22 22:02:26 +00002345static void be_msix_disable(struct be_adapter *adapter)
2346{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002347 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002348 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002349 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302350 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002351 }
2352}
2353
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002354static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302356 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002357 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358
Sathya Perla92bf14a2013-08-27 16:57:32 +05302359 /* If RoCE is supported, program the max number of NIC vectors that
2360 * may be configured via set-channels, along with vectors needed for
2361 * RoCe. Else, just program the number we'll use initially.
2362 */
2363 if (be_roce_supported(adapter))
2364 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2365 2 * num_online_cpus());
2366 else
2367 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002368
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002369 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370 adapter->msix_entries[i].entry = i;
2371
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002372 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002373 if (status == 0) {
2374 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302375 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002376 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002377 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2378 num_vec);
2379 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002380 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002381 }
Sathya Perlad3791422012-09-28 04:39:44 +00002382
2383 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302384
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002385 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2386 if (!be_physfn(adapter))
2387 return status;
2388 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002389done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302390 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2391 adapter->num_msix_roce_vec = num_vec / 2;
2392 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2393 adapter->num_msix_roce_vec);
2394 }
2395
2396 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2397
2398 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2399 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002400 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401}
2402
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002403static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002405{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302406 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407}
2408
2409static int be_msix_register(struct be_adapter *adapter)
2410{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002411 struct net_device *netdev = adapter->netdev;
2412 struct be_eq_obj *eqo;
2413 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 for_all_evt_queues(adapter, eqo, i) {
2416 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2417 vec = be_msix_vec_get(adapter, eqo);
2418 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002419 if (status)
2420 goto err_msix;
2421 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002422
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002424err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2426 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2427 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2428 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002429 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002430 return status;
2431}
2432
2433static int be_irq_register(struct be_adapter *adapter)
2434{
2435 struct net_device *netdev = adapter->netdev;
2436 int status;
2437
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002438 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439 status = be_msix_register(adapter);
2440 if (status == 0)
2441 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002442 /* INTx is not supported for VF */
2443 if (!be_physfn(adapter))
2444 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445 }
2446
Sathya Perlae49cc342012-11-27 19:50:02 +00002447 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002448 netdev->irq = adapter->pdev->irq;
2449 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002450 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002451 if (status) {
2452 dev_err(&adapter->pdev->dev,
2453 "INTx request IRQ failed - err %d\n", status);
2454 return status;
2455 }
2456done:
2457 adapter->isr_registered = true;
2458 return 0;
2459}
2460
2461static void be_irq_unregister(struct be_adapter *adapter)
2462{
2463 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002465 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002466
2467 if (!adapter->isr_registered)
2468 return;
2469
2470 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002471 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002472 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473 goto done;
2474 }
2475
2476 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002477 for_all_evt_queues(adapter, eqo, i)
2478 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480done:
2481 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482}
2483
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002484static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002485{
2486 struct be_queue_info *q;
2487 struct be_rx_obj *rxo;
2488 int i;
2489
2490 for_all_rx_queues(adapter, rxo, i) {
2491 q = &rxo->q;
2492 if (q->created) {
2493 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002494 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002495 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002496 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002497 }
2498}
2499
Sathya Perla889cd4b2010-05-30 23:33:45 +00002500static int be_close(struct net_device *netdev)
2501{
2502 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002503 struct be_eq_obj *eqo;
2504 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002505
Parav Pandit045508a2012-03-26 14:27:13 +00002506 be_roce_dev_close(adapter);
2507
Somnath Kotur04d3d622013-05-02 03:36:55 +00002508 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2509 for_all_evt_queues(adapter, eqo, i)
2510 napi_disable(&eqo->napi);
2511 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2512 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002513
2514 be_async_mcc_disable(adapter);
2515
2516 /* Wait for all pending tx completions to arrive so that
2517 * all tx skbs are freed.
2518 */
Sathya Perlafba87552013-05-08 02:05:50 +00002519 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302520 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002521
2522 be_rx_qs_destroy(adapter);
2523
2524 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 if (msix_enabled(adapter))
2526 synchronize_irq(be_msix_vec_get(adapter, eqo));
2527 else
2528 synchronize_irq(netdev->irq);
2529 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002530 }
2531
Sathya Perla889cd4b2010-05-30 23:33:45 +00002532 be_irq_unregister(adapter);
2533
Sathya Perla482c9e72011-06-29 23:33:17 +00002534 return 0;
2535}
2536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002538{
2539 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002540 int rc, i, j;
2541 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002542
2543 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2545 sizeof(struct be_eth_rx_d));
2546 if (rc)
2547 return rc;
2548 }
2549
2550 /* The FW would like the default RXQ to be created first */
2551 rxo = default_rxo(adapter);
2552 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2553 adapter->if_handle, false, &rxo->rss_id);
2554 if (rc)
2555 return rc;
2556
2557 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002558 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559 rx_frag_size, adapter->if_handle,
2560 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002561 if (rc)
2562 return rc;
2563 }
2564
2565 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002566 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2567 for_all_rss_queues(adapter, rxo, i) {
2568 if ((j + i) >= 128)
2569 break;
2570 rsstable[j + i] = rxo->rss_id;
2571 }
2572 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002573 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2574 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2575
2576 if (!BEx_chip(adapter))
2577 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2578 RSS_ENABLE_UDP_IPV6;
2579
2580 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2581 128);
2582 if (rc) {
2583 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002584 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002585 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002586 }
2587
2588 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002590 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002591 return 0;
2592}
2593
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594static int be_open(struct net_device *netdev)
2595{
2596 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002597 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002598 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002599 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002600 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002601 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002602
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002603 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002604 if (status)
2605 goto err;
2606
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002607 status = be_irq_register(adapter);
2608 if (status)
2609 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002612 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002613
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002614 for_all_tx_queues(adapter, txo, i)
2615 be_cq_notify(adapter, txo->cq.id, true, 0);
2616
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002617 be_async_mcc_enable(adapter);
2618
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 for_all_evt_queues(adapter, eqo, i) {
2620 napi_enable(&eqo->napi);
2621 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2622 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002623 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624
Sathya Perla323ff712012-09-28 04:39:43 +00002625 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002626 if (!status)
2627 be_link_status_update(adapter, link_status);
2628
Sathya Perlafba87552013-05-08 02:05:50 +00002629 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002630 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002631 return 0;
2632err:
2633 be_close(adapter->netdev);
2634 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635}
2636
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002637static int be_setup_wol(struct be_adapter *adapter, bool enable)
2638{
2639 struct be_dma_mem cmd;
2640 int status = 0;
2641 u8 mac[ETH_ALEN];
2642
2643 memset(mac, 0, ETH_ALEN);
2644
2645 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002646 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2647 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002648 if (cmd.va == NULL)
2649 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002650
2651 if (enable) {
2652 status = pci_write_config_dword(adapter->pdev,
2653 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2654 if (status) {
2655 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002656 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002657 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2658 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002659 return status;
2660 }
2661 status = be_cmd_enable_magic_wol(adapter,
2662 adapter->netdev->dev_addr, &cmd);
2663 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2664 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2665 } else {
2666 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2667 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2668 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2669 }
2670
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002671 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002672 return status;
2673}
2674
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002675/*
2676 * Generate a seed MAC address from the PF MAC Address using jhash.
2677 * MAC Address for VFs are assigned incrementally starting from the seed.
2678 * These addresses are programmed in the ASIC by the PF and the VF driver
2679 * queries for the MAC address during its probe.
2680 */
Sathya Perla4c876612013-02-03 20:30:11 +00002681static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002682{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002683 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002684 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002685 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002686 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002687
2688 be_vf_eth_addr_generate(adapter, mac);
2689
Sathya Perla11ac75e2011-12-13 00:58:50 +00002690 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302691 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002692 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002693 vf_cfg->if_handle,
2694 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302695 else
2696 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2697 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002698
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002699 if (status)
2700 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002701 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002702 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002703 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002704
2705 mac[5] += 1;
2706 }
2707 return status;
2708}
2709
Sathya Perla4c876612013-02-03 20:30:11 +00002710static int be_vfs_mac_query(struct be_adapter *adapter)
2711{
2712 int status, vf;
2713 u8 mac[ETH_ALEN];
2714 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302715 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002716
2717 for_all_vfs(adapter, vf_cfg, vf) {
2718 be_cmd_get_mac_from_list(adapter, mac, &active,
2719 &vf_cfg->pmac_id, 0);
2720
2721 status = be_cmd_mac_addr_query(adapter, mac, false,
2722 vf_cfg->if_handle, 0);
2723 if (status)
2724 return status;
2725 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2726 }
2727 return 0;
2728}
2729
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002730static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002731{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002732 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002733 u32 vf;
2734
Sathya Perla257a3fe2013-06-14 15:54:51 +05302735 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002736 dev_warn(&adapter->pdev->dev,
2737 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002738 goto done;
2739 }
2740
Sathya Perlab4c1df92013-05-08 02:05:47 +00002741 pci_disable_sriov(adapter->pdev);
2742
Sathya Perla11ac75e2011-12-13 00:58:50 +00002743 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302744 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002745 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2746 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302747 else
2748 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2749 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002750
Sathya Perla11ac75e2011-12-13 00:58:50 +00002751 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2752 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002753done:
2754 kfree(adapter->vf_cfg);
2755 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002756}
2757
Sathya Perla77071332013-08-27 16:57:34 +05302758static void be_clear_queues(struct be_adapter *adapter)
2759{
2760 be_mcc_queues_destroy(adapter);
2761 be_rx_cqs_destroy(adapter);
2762 be_tx_queues_destroy(adapter);
2763 be_evt_queues_destroy(adapter);
2764}
2765
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302766static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002767{
Sathya Perla191eb752012-02-23 18:50:13 +00002768 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2769 cancel_delayed_work_sync(&adapter->work);
2770 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2771 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302772}
2773
2774static int be_clear(struct be_adapter *adapter)
2775{
2776 int i;
2777
2778 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002779
Sathya Perla11ac75e2011-12-13 00:58:50 +00002780 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002781 be_vf_clear(adapter);
2782
Sathya Perla2d17f402013-07-23 15:25:04 +05302783 /* delete the primary mac along with the uc-mac list */
2784 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002785 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302786 adapter->pmac_id[i], 0);
2787 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002788
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002789 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002790
Sathya Perla77071332013-08-27 16:57:34 +05302791 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002792
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002793 kfree(adapter->pmac_id);
2794 adapter->pmac_id = NULL;
2795
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002797 return 0;
2798}
2799
Sathya Perla4c876612013-02-03 20:30:11 +00002800static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002801{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302802 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002803 struct be_vf_cfg *vf_cfg;
2804 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002805 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002806
Sathya Perla4c876612013-02-03 20:30:11 +00002807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2808 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002809
Sathya Perla4c876612013-02-03 20:30:11 +00002810 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302811 if (!BE3_chip(adapter)) {
2812 status = be_cmd_get_profile_config(adapter, &res,
2813 vf + 1);
2814 if (!status)
2815 cap_flags = res.if_cap_flags;
2816 }
Sathya Perla4c876612013-02-03 20:30:11 +00002817
2818 /* If a FW profile exists, then cap_flags are updated */
2819 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2820 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2821 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2822 &vf_cfg->if_handle, vf + 1);
2823 if (status)
2824 goto err;
2825 }
2826err:
2827 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002828}
2829
Sathya Perla39f1d942012-05-08 19:41:24 +00002830static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002831{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002832 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002833 int vf;
2834
Sathya Perla39f1d942012-05-08 19:41:24 +00002835 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2836 GFP_KERNEL);
2837 if (!adapter->vf_cfg)
2838 return -ENOMEM;
2839
Sathya Perla11ac75e2011-12-13 00:58:50 +00002840 for_all_vfs(adapter, vf_cfg, vf) {
2841 vf_cfg->if_handle = -1;
2842 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002843 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002844 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002845}
2846
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002847static int be_vf_setup(struct be_adapter *adapter)
2848{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002849 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002850 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002851 int status, old_vfs, vf;
2852 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302853 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002854
Sathya Perla257a3fe2013-06-14 15:54:51 +05302855 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002856 if (old_vfs) {
2857 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2858 if (old_vfs != num_vfs)
2859 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2860 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002861 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302862 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002863 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302864 be_max_vfs(adapter), num_vfs);
2865 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002866 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002867 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002868 }
2869
2870 status = be_vf_setup_init(adapter);
2871 if (status)
2872 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002873
Sathya Perla4c876612013-02-03 20:30:11 +00002874 if (old_vfs) {
2875 for_all_vfs(adapter, vf_cfg, vf) {
2876 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2877 if (status)
2878 goto err;
2879 }
2880 } else {
2881 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002882 if (status)
2883 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002884 }
2885
Sathya Perla4c876612013-02-03 20:30:11 +00002886 if (old_vfs) {
2887 status = be_vfs_mac_query(adapter);
2888 if (status)
2889 goto err;
2890 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002891 status = be_vf_eth_addr_config(adapter);
2892 if (status)
2893 goto err;
2894 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002895
Sathya Perla11ac75e2011-12-13 00:58:50 +00002896 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302897 /* Allow VFs to programs MAC/VLAN filters */
2898 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2899 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2900 status = be_cmd_set_fn_privileges(adapter,
2901 privileges |
2902 BE_PRIV_FILTMGMT,
2903 vf + 1);
2904 if (!status)
2905 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2906 vf);
2907 }
2908
Sathya Perla4c876612013-02-03 20:30:11 +00002909 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2910 * Allow full available bandwidth
2911 */
2912 if (BE3_chip(adapter) && !old_vfs)
2913 be_cmd_set_qos(adapter, 1000, vf+1);
2914
2915 status = be_cmd_link_status_query(adapter, &lnk_speed,
2916 NULL, vf + 1);
2917 if (!status)
2918 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002919
2920 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05002921 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002922 if (status)
2923 goto err;
2924 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002925
Vasundhara Volam05998632013-10-01 15:59:59 +05302926 if (!old_vfs)
2927 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002928 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002929
2930 if (!old_vfs) {
2931 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2932 if (status) {
2933 dev_err(dev, "SRIOV enable failed\n");
2934 adapter->num_vfs = 0;
2935 goto err;
2936 }
2937 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002938 return 0;
2939err:
Sathya Perla4c876612013-02-03 20:30:11 +00002940 dev_err(dev, "VF setup failed\n");
2941 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002942 return status;
2943}
2944
Sathya Perla92bf14a2013-08-27 16:57:32 +05302945/* On BE2/BE3 FW does not suggest the supported limits */
2946static void BEx_get_resources(struct be_adapter *adapter,
2947 struct be_resources *res)
2948{
2949 struct pci_dev *pdev = adapter->pdev;
2950 bool use_sriov = false;
2951
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05302952 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302953 int max_vfs;
2954
2955 max_vfs = pci_sriov_get_totalvfs(pdev);
2956 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05302957 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302958 }
2959
2960 if (be_physfn(adapter))
2961 res->max_uc_mac = BE_UC_PMAC_COUNT;
2962 else
2963 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2964
2965 if (adapter->function_mode & FLEX10_MODE)
2966 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2967 else
2968 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2969 res->max_mcast_mac = BE_MAX_MC;
2970
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05302971 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302972 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05302973 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05302974 res->max_tx_qs = 1;
2975 else
2976 res->max_tx_qs = BE3_MAX_TX_QS;
2977
2978 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2979 !use_sriov && be_physfn(adapter))
2980 res->max_rss_qs = (adapter->be3_native) ?
2981 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2982 res->max_rx_qs = res->max_rss_qs + 1;
2983
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302984 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302985
2986 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2987 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2988 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2989}
2990
Sathya Perla30128032011-11-10 19:17:57 +00002991static void be_setup_init(struct be_adapter *adapter)
2992{
2993 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002994 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002995 adapter->if_handle = -1;
2996 adapter->be3_native = false;
2997 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002998 if (be_physfn(adapter))
2999 adapter->cmd_privileges = MAX_PRIVILEGES;
3000 else
3001 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003002}
3003
Sathya Perla92bf14a2013-08-27 16:57:32 +05303004static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003005{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303006 struct device *dev = &adapter->pdev->dev;
3007 struct be_resources res = {0};
3008 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003009
Sathya Perla92bf14a2013-08-27 16:57:32 +05303010 if (BEx_chip(adapter)) {
3011 BEx_get_resources(adapter, &res);
3012 adapter->res = res;
3013 }
3014
Sathya Perla92bf14a2013-08-27 16:57:32 +05303015 /* For Lancer, SH etc read per-function resource limits from FW.
3016 * GET_FUNC_CONFIG returns per function guaranteed limits.
3017 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3018 */
Sathya Perla4c876612013-02-03 20:30:11 +00003019 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303020 status = be_cmd_get_func_config(adapter, &res);
3021 if (status)
3022 return status;
3023
3024 /* If RoCE may be enabled stash away half the EQs for RoCE */
3025 if (be_roce_supported(adapter))
3026 res.max_evt_qs /= 2;
3027 adapter->res = res;
3028
3029 if (be_physfn(adapter)) {
3030 status = be_cmd_get_profile_config(adapter, &res, 0);
3031 if (status)
3032 return status;
3033 adapter->res.max_vfs = res.max_vfs;
3034 }
3035
3036 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3037 be_max_txqs(adapter), be_max_rxqs(adapter),
3038 be_max_rss(adapter), be_max_eqs(adapter),
3039 be_max_vfs(adapter));
3040 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3041 be_max_uc(adapter), be_max_mc(adapter),
3042 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003043 }
3044
Sathya Perla92bf14a2013-08-27 16:57:32 +05303045 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003046}
3047
Sathya Perla39f1d942012-05-08 19:41:24 +00003048/* Routine to query per function resource limits */
3049static int be_get_config(struct be_adapter *adapter)
3050{
Sathya Perla4c876612013-02-03 20:30:11 +00003051 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003052
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003053 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3054 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003055 &adapter->function_caps,
3056 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003057 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303058 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003059
Sathya Perla92bf14a2013-08-27 16:57:32 +05303060 status = be_get_resources(adapter);
3061 if (status)
3062 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003063
3064 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303065 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3066 GFP_KERNEL);
3067 if (!adapter->pmac_id)
3068 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003069
Sathya Perla92bf14a2013-08-27 16:57:32 +05303070 /* Sanitize cfg_num_qs based on HW and platform limits */
3071 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3072
3073 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003074}
3075
Sathya Perla95046b92013-07-23 15:25:02 +05303076static int be_mac_setup(struct be_adapter *adapter)
3077{
3078 u8 mac[ETH_ALEN];
3079 int status;
3080
3081 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3082 status = be_cmd_get_perm_mac(adapter, mac);
3083 if (status)
3084 return status;
3085
3086 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3087 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3088 } else {
3089 /* Maybe the HW was reset; dev_addr must be re-programmed */
3090 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3091 }
3092
3093 /* On BE3 VFs this cmd may fail due to lack of privilege.
3094 * Ignore the failure as in this case pmac_id is fetched
3095 * in the IFACE_CREATE cmd.
3096 */
3097 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3098 &adapter->pmac_id[0], 0);
3099 return 0;
3100}
3101
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303102static void be_schedule_worker(struct be_adapter *adapter)
3103{
3104 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3105 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3106}
3107
Sathya Perla77071332013-08-27 16:57:34 +05303108static int be_setup_queues(struct be_adapter *adapter)
3109{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303110 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303111 int status;
3112
3113 status = be_evt_queues_create(adapter);
3114 if (status)
3115 goto err;
3116
3117 status = be_tx_qs_create(adapter);
3118 if (status)
3119 goto err;
3120
3121 status = be_rx_cqs_create(adapter);
3122 if (status)
3123 goto err;
3124
3125 status = be_mcc_queues_create(adapter);
3126 if (status)
3127 goto err;
3128
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303129 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3130 if (status)
3131 goto err;
3132
3133 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3134 if (status)
3135 goto err;
3136
Sathya Perla77071332013-08-27 16:57:34 +05303137 return 0;
3138err:
3139 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3140 return status;
3141}
3142
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303143int be_update_queues(struct be_adapter *adapter)
3144{
3145 struct net_device *netdev = adapter->netdev;
3146 int status;
3147
3148 if (netif_running(netdev))
3149 be_close(netdev);
3150
3151 be_cancel_worker(adapter);
3152
3153 /* If any vectors have been shared with RoCE we cannot re-program
3154 * the MSIx table.
3155 */
3156 if (!adapter->num_msix_roce_vec)
3157 be_msix_disable(adapter);
3158
3159 be_clear_queues(adapter);
3160
3161 if (!msix_enabled(adapter)) {
3162 status = be_msix_enable(adapter);
3163 if (status)
3164 return status;
3165 }
3166
3167 status = be_setup_queues(adapter);
3168 if (status)
3169 return status;
3170
3171 be_schedule_worker(adapter);
3172
3173 if (netif_running(netdev))
3174 status = be_open(netdev);
3175
3176 return status;
3177}
3178
Sathya Perla5fb379e2009-06-18 00:02:59 +00003179static int be_setup(struct be_adapter *adapter)
3180{
Sathya Perla39f1d942012-05-08 19:41:24 +00003181 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303182 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003183 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184
Sathya Perla30128032011-11-10 19:17:57 +00003185 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003186
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003187 if (!lancer_chip(adapter))
3188 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003189
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003190 status = be_get_config(adapter);
3191 if (status)
3192 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003193
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003194 status = be_msix_enable(adapter);
3195 if (status)
3196 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003197
Sathya Perla77071332013-08-27 16:57:34 +05303198 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3199 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3200 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3201 en_flags |= BE_IF_FLAGS_RSS;
3202 en_flags = en_flags & be_if_cap_flags(adapter);
3203 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3204 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003205 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003206 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003207
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303208 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3209 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303210 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303211 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003212 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003213 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003215 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3216 /* In UMC mode FW does not return right privileges.
3217 * Override with correct privilege equivalent to PF.
3218 */
3219 if (be_is_mc(adapter))
3220 adapter->cmd_privileges = MAX_PRIVILEGES;
3221
Sathya Perla95046b92013-07-23 15:25:02 +05303222 status = be_mac_setup(adapter);
3223 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003224 goto err;
3225
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003226 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003227
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003228 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003229 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003230
3231 be_set_rx_mode(adapter->netdev);
3232
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003233 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003234
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003235 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3236 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003237 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003238
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303239 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303240 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003241 be_vf_setup(adapter);
3242 else
3243 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003244 }
3245
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003246 status = be_cmd_get_phy_info(adapter);
3247 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003248 adapter->phy.fc_autoneg = 1;
3249
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303250 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003251 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003252err:
3253 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254 return status;
3255}
3256
Ivan Vecera66268732011-12-08 01:31:21 +00003257#ifdef CONFIG_NET_POLL_CONTROLLER
3258static void be_netpoll(struct net_device *netdev)
3259{
3260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003261 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003262 int i;
3263
Sathya Perlae49cc342012-11-27 19:50:02 +00003264 for_all_evt_queues(adapter, eqo, i) {
3265 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3266 napi_schedule(&eqo->napi);
3267 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003268
3269 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003270}
3271#endif
3272
Ajit Khaparde84517482009-09-04 03:12:16 +00003273#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003274static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003275
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003276static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003277 const u8 *p, u32 img_start, int image_size,
3278 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003279{
3280 u32 crc_offset;
3281 u8 flashed_crc[4];
3282 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003283
3284 crc_offset = hdr_size + img_start + image_size - 4;
3285
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003286 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003287
3288 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003289 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003290 if (status) {
3291 dev_err(&adapter->pdev->dev,
3292 "could not get crc from flash, not flashing redboot\n");
3293 return false;
3294 }
3295
3296 /*update redboot only if crc does not match*/
3297 if (!memcmp(flashed_crc, p, 4))
3298 return false;
3299 else
3300 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003301}
3302
Sathya Perla306f1342011-08-02 19:57:45 +00003303static bool phy_flashing_required(struct be_adapter *adapter)
3304{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003305 return (adapter->phy.phy_type == TN_8022 &&
3306 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003307}
3308
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003309static bool is_comp_in_ufi(struct be_adapter *adapter,
3310 struct flash_section_info *fsec, int type)
3311{
3312 int i = 0, img_type = 0;
3313 struct flash_section_info_g2 *fsec_g2 = NULL;
3314
Sathya Perlaca34fe32012-11-06 17:48:56 +00003315 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003316 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3317
3318 for (i = 0; i < MAX_FLASH_COMP; i++) {
3319 if (fsec_g2)
3320 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3321 else
3322 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3323
3324 if (img_type == type)
3325 return true;
3326 }
3327 return false;
3328
3329}
3330
Jingoo Han4188e7d2013-08-05 18:02:02 +09003331static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003332 int header_size,
3333 const struct firmware *fw)
3334{
3335 struct flash_section_info *fsec = NULL;
3336 const u8 *p = fw->data;
3337
3338 p += header_size;
3339 while (p < (fw->data + fw->size)) {
3340 fsec = (struct flash_section_info *)p;
3341 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3342 return fsec;
3343 p += 32;
3344 }
3345 return NULL;
3346}
3347
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003348static int be_flash(struct be_adapter *adapter, const u8 *img,
3349 struct be_dma_mem *flash_cmd, int optype, int img_size)
3350{
3351 u32 total_bytes = 0, flash_op, num_bytes = 0;
3352 int status = 0;
3353 struct be_cmd_write_flashrom *req = flash_cmd->va;
3354
3355 total_bytes = img_size;
3356 while (total_bytes) {
3357 num_bytes = min_t(u32, 32*1024, total_bytes);
3358
3359 total_bytes -= num_bytes;
3360
3361 if (!total_bytes) {
3362 if (optype == OPTYPE_PHY_FW)
3363 flash_op = FLASHROM_OPER_PHY_FLASH;
3364 else
3365 flash_op = FLASHROM_OPER_FLASH;
3366 } else {
3367 if (optype == OPTYPE_PHY_FW)
3368 flash_op = FLASHROM_OPER_PHY_SAVE;
3369 else
3370 flash_op = FLASHROM_OPER_SAVE;
3371 }
3372
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003373 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003374 img += num_bytes;
3375 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3376 flash_op, num_bytes);
3377 if (status) {
3378 if (status == ILLEGAL_IOCTL_REQ &&
3379 optype == OPTYPE_PHY_FW)
3380 break;
3381 dev_err(&adapter->pdev->dev,
3382 "cmd to write to flash rom failed.\n");
3383 return status;
3384 }
3385 }
3386 return 0;
3387}
3388
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003389/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003390static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003391 const struct firmware *fw,
3392 struct be_dma_mem *flash_cmd,
3393 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003394
Ajit Khaparde84517482009-09-04 03:12:16 +00003395{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003396 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003397 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003398 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003399 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003400 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003401 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003402
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003403 struct flash_comp gen3_flash_types[] = {
3404 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3405 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3406 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3407 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3408 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3409 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3410 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3411 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3412 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3413 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3414 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3415 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3416 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3417 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3418 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3419 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3420 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3421 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3422 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3423 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003424 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003425
3426 struct flash_comp gen2_flash_types[] = {
3427 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3428 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3429 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3430 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3431 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3432 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3433 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3434 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3435 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3436 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3437 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3438 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3439 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3440 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3441 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3442 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003443 };
3444
Sathya Perlaca34fe32012-11-06 17:48:56 +00003445 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003446 pflashcomp = gen3_flash_types;
3447 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003448 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003449 } else {
3450 pflashcomp = gen2_flash_types;
3451 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003452 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003453 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003454
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003455 /* Get flash section info*/
3456 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3457 if (!fsec) {
3458 dev_err(&adapter->pdev->dev,
3459 "Invalid Cookie. UFI corrupted ?\n");
3460 return -1;
3461 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003462 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003463 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003464 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003465
3466 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3467 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3468 continue;
3469
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003470 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3471 !phy_flashing_required(adapter))
3472 continue;
3473
3474 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3475 redboot = be_flash_redboot(adapter, fw->data,
3476 pflashcomp[i].offset, pflashcomp[i].size,
3477 filehdr_size + img_hdrs_size);
3478 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003479 continue;
3480 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003481
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003482 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003483 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003484 if (p + pflashcomp[i].size > fw->data + fw->size)
3485 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003486
3487 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3488 pflashcomp[i].size);
3489 if (status) {
3490 dev_err(&adapter->pdev->dev,
3491 "Flashing section type %d failed.\n",
3492 pflashcomp[i].img_type);
3493 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003494 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003495 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003496 return 0;
3497}
3498
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003499static int be_flash_skyhawk(struct be_adapter *adapter,
3500 const struct firmware *fw,
3501 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003502{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003503 int status = 0, i, filehdr_size = 0;
3504 int img_offset, img_size, img_optype, redboot;
3505 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3506 const u8 *p = fw->data;
3507 struct flash_section_info *fsec = NULL;
3508
3509 filehdr_size = sizeof(struct flash_file_hdr_g3);
3510 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3511 if (!fsec) {
3512 dev_err(&adapter->pdev->dev,
3513 "Invalid Cookie. UFI corrupted ?\n");
3514 return -1;
3515 }
3516
3517 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3518 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3519 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3520
3521 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3522 case IMAGE_FIRMWARE_iSCSI:
3523 img_optype = OPTYPE_ISCSI_ACTIVE;
3524 break;
3525 case IMAGE_BOOT_CODE:
3526 img_optype = OPTYPE_REDBOOT;
3527 break;
3528 case IMAGE_OPTION_ROM_ISCSI:
3529 img_optype = OPTYPE_BIOS;
3530 break;
3531 case IMAGE_OPTION_ROM_PXE:
3532 img_optype = OPTYPE_PXE_BIOS;
3533 break;
3534 case IMAGE_OPTION_ROM_FCoE:
3535 img_optype = OPTYPE_FCOE_BIOS;
3536 break;
3537 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3538 img_optype = OPTYPE_ISCSI_BACKUP;
3539 break;
3540 case IMAGE_NCSI:
3541 img_optype = OPTYPE_NCSI_FW;
3542 break;
3543 default:
3544 continue;
3545 }
3546
3547 if (img_optype == OPTYPE_REDBOOT) {
3548 redboot = be_flash_redboot(adapter, fw->data,
3549 img_offset, img_size,
3550 filehdr_size + img_hdrs_size);
3551 if (!redboot)
3552 continue;
3553 }
3554
3555 p = fw->data;
3556 p += filehdr_size + img_offset + img_hdrs_size;
3557 if (p + img_size > fw->data + fw->size)
3558 return -1;
3559
3560 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3561 if (status) {
3562 dev_err(&adapter->pdev->dev,
3563 "Flashing section type %d failed.\n",
3564 fsec->fsec_entry[i].type);
3565 return status;
3566 }
3567 }
3568 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003569}
3570
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003571static int lancer_fw_download(struct be_adapter *adapter,
3572 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003573{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003574#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3575#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3576 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003577 const u8 *data_ptr = NULL;
3578 u8 *dest_image_ptr = NULL;
3579 size_t image_size = 0;
3580 u32 chunk_size = 0;
3581 u32 data_written = 0;
3582 u32 offset = 0;
3583 int status = 0;
3584 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003585 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003586
3587 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3588 dev_err(&adapter->pdev->dev,
3589 "FW Image not properly aligned. "
3590 "Length must be 4 byte aligned.\n");
3591 status = -EINVAL;
3592 goto lancer_fw_exit;
3593 }
3594
3595 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3596 + LANCER_FW_DOWNLOAD_CHUNK;
3597 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003598 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003599 if (!flash_cmd.va) {
3600 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003601 goto lancer_fw_exit;
3602 }
3603
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003604 dest_image_ptr = flash_cmd.va +
3605 sizeof(struct lancer_cmd_req_write_object);
3606 image_size = fw->size;
3607 data_ptr = fw->data;
3608
3609 while (image_size) {
3610 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3611
3612 /* Copy the image chunk content. */
3613 memcpy(dest_image_ptr, data_ptr, chunk_size);
3614
3615 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003616 chunk_size, offset,
3617 LANCER_FW_DOWNLOAD_LOCATION,
3618 &data_written, &change_status,
3619 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003620 if (status)
3621 break;
3622
3623 offset += data_written;
3624 data_ptr += data_written;
3625 image_size -= data_written;
3626 }
3627
3628 if (!status) {
3629 /* Commit the FW written */
3630 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003631 0, offset,
3632 LANCER_FW_DOWNLOAD_LOCATION,
3633 &data_written, &change_status,
3634 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003635 }
3636
3637 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3638 flash_cmd.dma);
3639 if (status) {
3640 dev_err(&adapter->pdev->dev,
3641 "Firmware load error. "
3642 "Status code: 0x%x Additional Status: 0x%x\n",
3643 status, add_status);
3644 goto lancer_fw_exit;
3645 }
3646
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003647 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003648 status = lancer_physdev_ctrl(adapter,
3649 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003650 if (status) {
3651 dev_err(&adapter->pdev->dev,
3652 "Adapter busy for FW reset.\n"
3653 "New FW will not be active.\n");
3654 goto lancer_fw_exit;
3655 }
3656 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3657 dev_err(&adapter->pdev->dev,
3658 "System reboot required for new FW"
3659 " to be active\n");
3660 }
3661
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003662 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3663lancer_fw_exit:
3664 return status;
3665}
3666
Sathya Perlaca34fe32012-11-06 17:48:56 +00003667#define UFI_TYPE2 2
3668#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003669#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003670#define UFI_TYPE4 4
3671static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003672 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003673{
3674 if (fhdr == NULL)
3675 goto be_get_ufi_exit;
3676
Sathya Perlaca34fe32012-11-06 17:48:56 +00003677 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3678 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003679 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3680 if (fhdr->asic_type_rev == 0x10)
3681 return UFI_TYPE3R;
3682 else
3683 return UFI_TYPE3;
3684 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003685 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003686
3687be_get_ufi_exit:
3688 dev_err(&adapter->pdev->dev,
3689 "UFI and Interface are not compatible for flashing\n");
3690 return -1;
3691}
3692
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003693static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3694{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003695 struct flash_file_hdr_g3 *fhdr3;
3696 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003697 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003698 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003699 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003700
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003701 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003702 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3703 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003704 if (!flash_cmd.va) {
3705 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003706 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003707 }
3708
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003709 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003710 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003711
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003712 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003713
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003714 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3715 for (i = 0; i < num_imgs; i++) {
3716 img_hdr_ptr = (struct image_hdr *)(fw->data +
3717 (sizeof(struct flash_file_hdr_g3) +
3718 i * sizeof(struct image_hdr)));
3719 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003720 switch (ufi_type) {
3721 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003722 status = be_flash_skyhawk(adapter, fw,
3723 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003724 break;
3725 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003726 status = be_flash_BEx(adapter, fw, &flash_cmd,
3727 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003728 break;
3729 case UFI_TYPE3:
3730 /* Do not flash this ufi on BE3-R cards */
3731 if (adapter->asic_rev < 0x10)
3732 status = be_flash_BEx(adapter, fw,
3733 &flash_cmd,
3734 num_imgs);
3735 else {
3736 status = -1;
3737 dev_err(&adapter->pdev->dev,
3738 "Can't load BE3 UFI on BE3R\n");
3739 }
3740 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003741 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003742 }
3743
Sathya Perlaca34fe32012-11-06 17:48:56 +00003744 if (ufi_type == UFI_TYPE2)
3745 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003746 else if (ufi_type == -1)
3747 status = -1;
3748
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003749 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3750 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003751 if (status) {
3752 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003753 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003754 }
3755
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003756 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003757
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003758be_fw_exit:
3759 return status;
3760}
3761
3762int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3763{
3764 const struct firmware *fw;
3765 int status;
3766
3767 if (!netif_running(adapter->netdev)) {
3768 dev_err(&adapter->pdev->dev,
3769 "Firmware load not allowed (interface is down)\n");
3770 return -1;
3771 }
3772
3773 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3774 if (status)
3775 goto fw_exit;
3776
3777 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3778
3779 if (lancer_chip(adapter))
3780 status = lancer_fw_download(adapter, fw);
3781 else
3782 status = be_fw_download(adapter, fw);
3783
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003784 if (!status)
3785 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3786 adapter->fw_on_flash);
3787
Ajit Khaparde84517482009-09-04 03:12:16 +00003788fw_exit:
3789 release_firmware(fw);
3790 return status;
3791}
3792
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003793static int be_ndo_bridge_setlink(struct net_device *dev,
3794 struct nlmsghdr *nlh)
3795{
3796 struct be_adapter *adapter = netdev_priv(dev);
3797 struct nlattr *attr, *br_spec;
3798 int rem;
3799 int status = 0;
3800 u16 mode = 0;
3801
3802 if (!sriov_enabled(adapter))
3803 return -EOPNOTSUPP;
3804
3805 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3806
3807 nla_for_each_nested(attr, br_spec, rem) {
3808 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3809 continue;
3810
3811 mode = nla_get_u16(attr);
3812 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3813 return -EINVAL;
3814
3815 status = be_cmd_set_hsw_config(adapter, 0, 0,
3816 adapter->if_handle,
3817 mode == BRIDGE_MODE_VEPA ?
3818 PORT_FWD_TYPE_VEPA :
3819 PORT_FWD_TYPE_VEB);
3820 if (status)
3821 goto err;
3822
3823 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3824 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3825
3826 return status;
3827 }
3828err:
3829 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3830 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3831
3832 return status;
3833}
3834
3835static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3836 struct net_device *dev,
3837 u32 filter_mask)
3838{
3839 struct be_adapter *adapter = netdev_priv(dev);
3840 int status = 0;
3841 u8 hsw_mode;
3842
3843 if (!sriov_enabled(adapter))
3844 return 0;
3845
3846 /* BE and Lancer chips support VEB mode only */
3847 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3848 hsw_mode = PORT_FWD_TYPE_VEB;
3849 } else {
3850 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3851 adapter->if_handle, &hsw_mode);
3852 if (status)
3853 return 0;
3854 }
3855
3856 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3857 hsw_mode == PORT_FWD_TYPE_VEPA ?
3858 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3859}
3860
stephen hemmingere5686ad2012-01-05 19:10:25 +00003861static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003862 .ndo_open = be_open,
3863 .ndo_stop = be_close,
3864 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003865 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003866 .ndo_set_mac_address = be_mac_addr_set,
3867 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003868 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003869 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003870 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3871 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003872 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003873 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003874 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003875 .ndo_get_vf_config = be_get_vf_config,
3876#ifdef CONFIG_NET_POLL_CONTROLLER
3877 .ndo_poll_controller = be_netpoll,
3878#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003879 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3880 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003881};
3882
3883static void be_netdev_init(struct net_device *netdev)
3884{
3885 struct be_adapter *adapter = netdev_priv(netdev);
3886
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003887 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003888 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003889 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003890 if (be_multi_rxq(adapter))
3891 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003892
3893 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003894 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003895
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003896 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003897 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003898
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003899 netdev->priv_flags |= IFF_UNICAST_FLT;
3900
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901 netdev->flags |= IFF_MULTICAST;
3902
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003903 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003904
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003905 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003906
3907 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003908}
3909
3910static void be_unmap_pci_bars(struct be_adapter *adapter)
3911{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003912 if (adapter->csr)
3913 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003914 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003915 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003916}
3917
Sathya Perlace66f782012-11-06 17:48:58 +00003918static int db_bar(struct be_adapter *adapter)
3919{
3920 if (lancer_chip(adapter) || !be_physfn(adapter))
3921 return 0;
3922 else
3923 return 4;
3924}
3925
3926static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003927{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003928 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003929 adapter->roce_db.size = 4096;
3930 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3931 db_bar(adapter));
3932 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3933 db_bar(adapter));
3934 }
Parav Pandit045508a2012-03-26 14:27:13 +00003935 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003936}
3937
3938static int be_map_pci_bars(struct be_adapter *adapter)
3939{
3940 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003941 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003942
Sathya Perlace66f782012-11-06 17:48:58 +00003943 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3944 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3945 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003946
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003947 if (BEx_chip(adapter) && be_physfn(adapter)) {
3948 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3949 if (adapter->csr == NULL)
3950 return -ENOMEM;
3951 }
3952
Sathya Perlace66f782012-11-06 17:48:58 +00003953 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003954 if (addr == NULL)
3955 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003956 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003957
3958 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003959 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003960
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003961pci_map_err:
3962 be_unmap_pci_bars(adapter);
3963 return -ENOMEM;
3964}
3965
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003966static void be_ctrl_cleanup(struct be_adapter *adapter)
3967{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003968 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003969
3970 be_unmap_pci_bars(adapter);
3971
3972 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003973 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3974 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003975
Sathya Perla5b8821b2011-08-02 19:57:44 +00003976 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003977 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003978 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3979 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980}
3981
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003982static int be_ctrl_init(struct be_adapter *adapter)
3983{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003984 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3985 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003986 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003987 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003989
Sathya Perlace66f782012-11-06 17:48:58 +00003990 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3991 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3992 SLI_INTF_FAMILY_SHIFT;
3993 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003995 status = be_map_pci_bars(adapter);
3996 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003997 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003998
3999 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004000 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4001 mbox_mem_alloc->size,
4002 &mbox_mem_alloc->dma,
4003 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004004 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004005 status = -ENOMEM;
4006 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004007 }
4008 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4009 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4010 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4011 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004012
Sathya Perla5b8821b2011-08-02 19:57:44 +00004013 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004014 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4015 rx_filter->size, &rx_filter->dma,
4016 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004017 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004018 status = -ENOMEM;
4019 goto free_mbox;
4020 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004021
Ivan Vecera29849612010-12-14 05:43:19 +00004022 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004023 spin_lock_init(&adapter->mcc_lock);
4024 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004025
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07004026 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004027 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004028 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004029
4030free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004031 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4032 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004033
4034unmap_pci_bars:
4035 be_unmap_pci_bars(adapter);
4036
4037done:
4038 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004039}
4040
4041static void be_stats_cleanup(struct be_adapter *adapter)
4042{
Sathya Perla3abcded2010-10-03 22:12:27 -07004043 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004044
4045 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004046 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4047 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004048}
4049
4050static int be_stats_init(struct be_adapter *adapter)
4051{
Sathya Perla3abcded2010-10-03 22:12:27 -07004052 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004053
Sathya Perlaca34fe32012-11-06 17:48:56 +00004054 if (lancer_chip(adapter))
4055 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4056 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004057 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004058 else
4059 /* BE3 and Skyhawk */
4060 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4061
Joe Perchesede23fa2013-08-26 22:45:23 -07004062 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4063 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064 if (cmd->va == NULL)
4065 return -1;
4066 return 0;
4067}
4068
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004069static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070{
4071 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004073 if (!adapter)
4074 return;
4075
Parav Pandit045508a2012-03-26 14:27:13 +00004076 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004077 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004078
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004079 cancel_delayed_work_sync(&adapter->func_recovery_work);
4080
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081 unregister_netdev(adapter->netdev);
4082
Sathya Perla5fb379e2009-06-18 00:02:59 +00004083 be_clear(adapter);
4084
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004085 /* tell fw we're done with firing cmds */
4086 be_cmd_fw_clean(adapter);
4087
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004088 be_stats_cleanup(adapter);
4089
4090 be_ctrl_cleanup(adapter);
4091
Sathya Perlad6b6d982012-09-05 01:56:48 +00004092 pci_disable_pcie_error_reporting(pdev);
4093
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004094 pci_set_drvdata(pdev, NULL);
4095 pci_release_regions(pdev);
4096 pci_disable_device(pdev);
4097
4098 free_netdev(adapter->netdev);
4099}
4100
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004101bool be_is_wol_supported(struct be_adapter *adapter)
4102{
4103 return ((adapter->wol_cap & BE_WOL_CAP) &&
4104 !be_is_wol_excluded(adapter)) ? true : false;
4105}
4106
Somnath Kotur941a77d2012-05-17 22:59:03 +00004107u32 be_get_fw_log_level(struct be_adapter *adapter)
4108{
4109 struct be_dma_mem extfat_cmd;
4110 struct be_fat_conf_params *cfgs;
4111 int status;
4112 u32 level = 0;
4113 int j;
4114
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004115 if (lancer_chip(adapter))
4116 return 0;
4117
Somnath Kotur941a77d2012-05-17 22:59:03 +00004118 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4119 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4120 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4121 &extfat_cmd.dma);
4122
4123 if (!extfat_cmd.va) {
4124 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4125 __func__);
4126 goto err;
4127 }
4128
4129 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4130 if (!status) {
4131 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4132 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004133 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004134 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4135 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4136 }
4137 }
4138 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4139 extfat_cmd.dma);
4140err:
4141 return level;
4142}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004143
Sathya Perla39f1d942012-05-08 19:41:24 +00004144static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004145{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004147 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004148
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004149 status = be_cmd_get_cntl_attributes(adapter);
4150 if (status)
4151 return status;
4152
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004153 status = be_cmd_get_acpi_wol_cap(adapter);
4154 if (status) {
4155 /* in case of a failure to get wol capabillities
4156 * check the exclusion list to determine WOL capability */
4157 if (!be_is_wol_excluded(adapter))
4158 adapter->wol_cap |= BE_WOL_CAP;
4159 }
4160
4161 if (be_is_wol_supported(adapter))
4162 adapter->wol = true;
4163
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004164 /* Must be a power of 2 or else MODULO will BUG_ON */
4165 adapter->be_get_temp_freq = 64;
4166
Somnath Kotur941a77d2012-05-17 22:59:03 +00004167 level = be_get_fw_log_level(adapter);
4168 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4169
Sathya Perla92bf14a2013-08-27 16:57:32 +05304170 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004171 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004172}
4173
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004174static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004175{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004176 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004177 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004178
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004179 status = lancer_test_and_set_rdy_state(adapter);
4180 if (status)
4181 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004182
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004183 if (netif_running(adapter->netdev))
4184 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004185
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004186 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004187
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004188 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004189
4190 status = be_setup(adapter);
4191 if (status)
4192 goto err;
4193
4194 if (netif_running(adapter->netdev)) {
4195 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004196 if (status)
4197 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004198 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004199
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004200 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004201 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004202err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004203 if (status == -EAGAIN)
4204 dev_err(dev, "Waiting for resource provisioning\n");
4205 else
4206 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004207
4208 return status;
4209}
4210
4211static void be_func_recovery_task(struct work_struct *work)
4212{
4213 struct be_adapter *adapter =
4214 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004215 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004216
4217 be_detect_error(adapter);
4218
4219 if (adapter->hw_error && lancer_chip(adapter)) {
4220
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004221 rtnl_lock();
4222 netif_device_detach(adapter->netdev);
4223 rtnl_unlock();
4224
4225 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004226 if (!status)
4227 netif_device_attach(adapter->netdev);
4228 }
4229
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004230 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4231 * no need to attempt further recovery.
4232 */
4233 if (!status || status == -EAGAIN)
4234 schedule_delayed_work(&adapter->func_recovery_work,
4235 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004236}
4237
4238static void be_worker(struct work_struct *work)
4239{
4240 struct be_adapter *adapter =
4241 container_of(work, struct be_adapter, work.work);
4242 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004243 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004244 int i;
4245
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004246 /* when interrupts are not yet enabled, just reap any pending
4247 * mcc completions */
4248 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004249 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004250 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004251 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004252 goto reschedule;
4253 }
4254
4255 if (!adapter->stats_cmd_sent) {
4256 if (lancer_chip(adapter))
4257 lancer_cmd_get_pport_stats(adapter,
4258 &adapter->stats_cmd);
4259 else
4260 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4261 }
4262
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304263 if (be_physfn(adapter) &&
4264 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004265 be_cmd_get_die_temperature(adapter);
4266
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004267 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004268 if (rxo->rx_post_starved) {
4269 rxo->rx_post_starved = false;
4270 be_post_rx_frags(rxo, GFP_KERNEL);
4271 }
4272 }
4273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004274 for_all_evt_queues(adapter, eqo, i)
4275 be_eqd_update(adapter, eqo);
4276
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004277reschedule:
4278 adapter->work_counter++;
4279 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4280}
4281
Sathya Perla257a3fe2013-06-14 15:54:51 +05304282/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004283static bool be_reset_required(struct be_adapter *adapter)
4284{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304285 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004286}
4287
Sathya Perlad3791422012-09-28 04:39:44 +00004288static char *mc_name(struct be_adapter *adapter)
4289{
4290 if (adapter->function_mode & FLEX10_MODE)
4291 return "FLEX10";
4292 else if (adapter->function_mode & VNIC_MODE)
4293 return "vNIC";
4294 else if (adapter->function_mode & UMC_ENABLED)
4295 return "UMC";
4296 else
4297 return "";
4298}
4299
4300static inline char *func_name(struct be_adapter *adapter)
4301{
4302 return be_physfn(adapter) ? "PF" : "VF";
4303}
4304
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004305static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004306{
4307 int status = 0;
4308 struct be_adapter *adapter;
4309 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004310 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004311
4312 status = pci_enable_device(pdev);
4313 if (status)
4314 goto do_none;
4315
4316 status = pci_request_regions(pdev, DRV_NAME);
4317 if (status)
4318 goto disable_dev;
4319 pci_set_master(pdev);
4320
Sathya Perla7f640062012-06-05 19:37:20 +00004321 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004322 if (netdev == NULL) {
4323 status = -ENOMEM;
4324 goto rel_reg;
4325 }
4326 adapter = netdev_priv(netdev);
4327 adapter->pdev = pdev;
4328 pci_set_drvdata(pdev, adapter);
4329 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004330 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004331
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004332 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004333 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004334 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4335 if (status < 0) {
4336 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4337 goto free_netdev;
4338 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004339 netdev->features |= NETIF_F_HIGHDMA;
4340 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004341 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304342 if (!status)
4343 status = dma_set_coherent_mask(&pdev->dev,
4344 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004345 if (status) {
4346 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4347 goto free_netdev;
4348 }
4349 }
4350
Sathya Perlad6b6d982012-09-05 01:56:48 +00004351 status = pci_enable_pcie_error_reporting(pdev);
4352 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004353 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004354
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004355 status = be_ctrl_init(adapter);
4356 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004357 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004358
Sathya Perla2243e2e2009-11-22 22:02:03 +00004359 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004360 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004361 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004362 if (status)
4363 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004364 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004365
Sathya Perla39f1d942012-05-08 19:41:24 +00004366 if (be_reset_required(adapter)) {
4367 status = be_cmd_reset_function(adapter);
4368 if (status)
4369 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004370
Kalesh AP2d177be2013-04-28 22:22:29 +00004371 /* Wait for interrupts to quiesce after an FLR */
4372 msleep(100);
4373 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004374
4375 /* Allow interrupts for other ULPs running on NIC function */
4376 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004377
Kalesh AP2d177be2013-04-28 22:22:29 +00004378 /* tell fw we're ready to fire cmds */
4379 status = be_cmd_fw_init(adapter);
4380 if (status)
4381 goto ctrl_clean;
4382
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004383 status = be_stats_init(adapter);
4384 if (status)
4385 goto ctrl_clean;
4386
Sathya Perla39f1d942012-05-08 19:41:24 +00004387 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388 if (status)
4389 goto stats_clean;
4390
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004391 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004392 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004393 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004394
Sathya Perla5fb379e2009-06-18 00:02:59 +00004395 status = be_setup(adapter);
4396 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004397 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004398
Sathya Perla3abcded2010-10-03 22:12:27 -07004399 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004400 status = register_netdev(netdev);
4401 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004402 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403
Parav Pandit045508a2012-03-26 14:27:13 +00004404 be_roce_dev_add(adapter);
4405
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004406 schedule_delayed_work(&adapter->func_recovery_work,
4407 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004408
4409 be_cmd_query_port_name(adapter, &port_name);
4410
Sathya Perlad3791422012-09-28 04:39:44 +00004411 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4412 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004413
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004414 return 0;
4415
Sathya Perla5fb379e2009-06-18 00:02:59 +00004416unsetup:
4417 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004418stats_clean:
4419 be_stats_cleanup(adapter);
4420ctrl_clean:
4421 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004422free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004423 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004424 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425rel_reg:
4426 pci_release_regions(pdev);
4427disable_dev:
4428 pci_disable_device(pdev);
4429do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004430 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004431 return status;
4432}
4433
4434static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4435{
4436 struct be_adapter *adapter = pci_get_drvdata(pdev);
4437 struct net_device *netdev = adapter->netdev;
4438
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004439 if (adapter->wol)
4440 be_setup_wol(adapter, true);
4441
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004442 cancel_delayed_work_sync(&adapter->func_recovery_work);
4443
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004444 netif_device_detach(netdev);
4445 if (netif_running(netdev)) {
4446 rtnl_lock();
4447 be_close(netdev);
4448 rtnl_unlock();
4449 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004450 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004451
4452 pci_save_state(pdev);
4453 pci_disable_device(pdev);
4454 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4455 return 0;
4456}
4457
4458static int be_resume(struct pci_dev *pdev)
4459{
4460 int status = 0;
4461 struct be_adapter *adapter = pci_get_drvdata(pdev);
4462 struct net_device *netdev = adapter->netdev;
4463
4464 netif_device_detach(netdev);
4465
4466 status = pci_enable_device(pdev);
4467 if (status)
4468 return status;
4469
Yijing Wang1ca01512013-06-27 20:53:42 +08004470 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004471 pci_restore_state(pdev);
4472
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304473 status = be_fw_wait_ready(adapter);
4474 if (status)
4475 return status;
4476
Sathya Perla2243e2e2009-11-22 22:02:03 +00004477 /* tell fw we're ready to fire cmds */
4478 status = be_cmd_fw_init(adapter);
4479 if (status)
4480 return status;
4481
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004482 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004483 if (netif_running(netdev)) {
4484 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004485 be_open(netdev);
4486 rtnl_unlock();
4487 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004488
4489 schedule_delayed_work(&adapter->func_recovery_work,
4490 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004491 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004492
4493 if (adapter->wol)
4494 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004495
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004496 return 0;
4497}
4498
Sathya Perla82456b02010-02-17 01:35:37 +00004499/*
4500 * An FLR will stop BE from DMAing any data.
4501 */
4502static void be_shutdown(struct pci_dev *pdev)
4503{
4504 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004505
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004506 if (!adapter)
4507 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004508
Sathya Perla0f4a6822011-03-21 20:49:28 +00004509 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004510 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004511
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004512 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004513
Ajit Khaparde57841862011-04-06 18:08:43 +00004514 be_cmd_reset_function(adapter);
4515
Sathya Perla82456b02010-02-17 01:35:37 +00004516 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004517}
4518
Sathya Perlacf588472010-02-14 21:22:01 +00004519static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4520 pci_channel_state_t state)
4521{
4522 struct be_adapter *adapter = pci_get_drvdata(pdev);
4523 struct net_device *netdev = adapter->netdev;
4524
4525 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4526
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004527 if (!adapter->eeh_error) {
4528 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004529
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004530 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004531
Sathya Perlacf588472010-02-14 21:22:01 +00004532 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004533 netif_device_detach(netdev);
4534 if (netif_running(netdev))
4535 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004536 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004537
4538 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004539 }
Sathya Perlacf588472010-02-14 21:22:01 +00004540
4541 if (state == pci_channel_io_perm_failure)
4542 return PCI_ERS_RESULT_DISCONNECT;
4543
4544 pci_disable_device(pdev);
4545
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004546 /* The error could cause the FW to trigger a flash debug dump.
4547 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004548 * can cause it not to recover; wait for it to finish.
4549 * Wait only for first function as it is needed only once per
4550 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004551 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004552 if (pdev->devfn == 0)
4553 ssleep(30);
4554
Sathya Perlacf588472010-02-14 21:22:01 +00004555 return PCI_ERS_RESULT_NEED_RESET;
4556}
4557
4558static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4559{
4560 struct be_adapter *adapter = pci_get_drvdata(pdev);
4561 int status;
4562
4563 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004564
4565 status = pci_enable_device(pdev);
4566 if (status)
4567 return PCI_ERS_RESULT_DISCONNECT;
4568
4569 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004570 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004571 pci_restore_state(pdev);
4572
4573 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004574 dev_info(&adapter->pdev->dev,
4575 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004576 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004577 if (status)
4578 return PCI_ERS_RESULT_DISCONNECT;
4579
Sathya Perlad6b6d982012-09-05 01:56:48 +00004580 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004581 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004582 return PCI_ERS_RESULT_RECOVERED;
4583}
4584
4585static void be_eeh_resume(struct pci_dev *pdev)
4586{
4587 int status = 0;
4588 struct be_adapter *adapter = pci_get_drvdata(pdev);
4589 struct net_device *netdev = adapter->netdev;
4590
4591 dev_info(&adapter->pdev->dev, "EEH resume\n");
4592
4593 pci_save_state(pdev);
4594
Kalesh AP2d177be2013-04-28 22:22:29 +00004595 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004596 if (status)
4597 goto err;
4598
Kalesh AP2d177be2013-04-28 22:22:29 +00004599 /* tell fw we're ready to fire cmds */
4600 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004601 if (status)
4602 goto err;
4603
Sathya Perlacf588472010-02-14 21:22:01 +00004604 status = be_setup(adapter);
4605 if (status)
4606 goto err;
4607
4608 if (netif_running(netdev)) {
4609 status = be_open(netdev);
4610 if (status)
4611 goto err;
4612 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004613
4614 schedule_delayed_work(&adapter->func_recovery_work,
4615 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004616 netif_device_attach(netdev);
4617 return;
4618err:
4619 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004620}
4621
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004622static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004623 .error_detected = be_eeh_err_detected,
4624 .slot_reset = be_eeh_reset,
4625 .resume = be_eeh_resume,
4626};
4627
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004628static struct pci_driver be_driver = {
4629 .name = DRV_NAME,
4630 .id_table = be_dev_ids,
4631 .probe = be_probe,
4632 .remove = be_remove,
4633 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004634 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004635 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004636 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004637};
4638
4639static int __init be_init_module(void)
4640{
Joe Perches8e95a202009-12-03 07:58:21 +00004641 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4642 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004643 printk(KERN_WARNING DRV_NAME
4644 " : Module param rx_frag_size must be 2048/4096/8192."
4645 " Using 2048\n");
4646 rx_frag_size = 2048;
4647 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004648
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004649 return pci_register_driver(&be_driver);
4650}
4651module_init(be_init_module);
4652
4653static void __exit be_exit_module(void)
4654{
4655 pci_unregister_driver(&be_driver);
4656}
4657module_exit(be_exit_module);