blob: 0a168e3d47ab1603e99cba4f53805c1b97f9b5a3 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070025
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000029MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030MODULE_LICENSE("GPL");
31
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla11ac75e2011-12-13 00:58:50 +000036static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070049 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000052/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070053static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000054 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070088static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000089 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700112 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122
Sathya Perla752961a2011-10-24 02:45:03 +0000123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000136 mem->va = NULL;
137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000152 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153 return 0;
154}
155
Somnath Kotur68c45a22013-03-14 02:42:07 +0000156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Somnath Kotur68c45a22013-03-14 02:42:07 +0000175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000207
208 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000220 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Sathya Perla5a712c12013-07-23 15:24:59 +0530260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000265 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 }
278
Sathya Perla5a712c12013-07-23 15:24:59 +0530279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000283 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000284 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 return 0;
297err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 return status;
300}
301
Sathya Perlaca34fe32012-11-06 17:48:56 +0000302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314}
315
316/* BE2 supports only v0 cmd */
317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318{
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328}
329
330static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000331{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000335 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000338
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377}
378
Sathya Perlaca34fe32012-11-06 17:48:56 +0000379static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423}
424
Selvin Xavier005d5692011-05-16 07:36:35 +0000425static void populate_lancer_stats(struct be_adapter *adapter)
426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427
Selvin Xavier005d5692011-05-16 07:36:35 +0000428 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000457 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000461 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000462}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463
Sathya Perla09c1c682011-08-22 19:41:53 +0000464static void accumulate_16bit_val(u32 *acc, u16 val)
465{
466#define lo(x) (x & 0xFFFF)
467#define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474}
475
Jingoo Han4188e7d2013-08-05 18:02:02 +0900476static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479{
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488}
489
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490void be_parse_stats(struct be_adapter *adapter)
491{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000495 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000496
Sathya Perlaca34fe32012-11-06 17:48:56 +0000497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505
Sathya Perlaca34fe32012-11-06 17:48:56 +0000506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000510 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000511 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512}
513
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000517 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000518 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700519 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000520 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000521 u64 pkts, bytes;
522 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700523 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524
Sathya Perla3abcded2010-10-03 22:12:27 -0700525 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700537 }
538
Sathya Perla3c8def92011-06-12 20:01:58 +0000539 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549
550 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000560 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000563 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000566
Sathya Perlaab1594e2011-07-25 19:10:15 +0000567 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568
569 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000571
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000577 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578}
579
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 struct net_device *netdev = adapter->netdev;
583
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000585 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593}
594
Sathya Perla3c8def92011-06-12 20:01:58 +0000595static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597{
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_stats *stats = tx_stats(txo);
599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000606 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608}
609
610/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 /* to account for hdr wrb */
619 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629}
630
631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632{
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000636 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637}
638
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641{
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653}
654
Somnath Koturcc4ce022010-10-21 07:11:14 -0700655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000658 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000664 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700677 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688}
689
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000691 bool unmap_single)
692{
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000698 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000699 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000702 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000704 }
705}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Sathya Perla3c8def92011-06-12 20:01:58 +0000707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710{
Sathya Perla7101e112010-03-22 20:41:12 +0000711 dma_addr_t busaddr;
712 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000713 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000717 bool map_single = false;
718 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000722 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723
David S. Millerebc8d2a2009-06-09 01:01:31 -0700724 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700725 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000728 goto dma_err;
729 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
David S. Millerebc8d2a2009-06-09 01:01:31 -0700737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000738 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700739 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000740 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000741 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000742 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000743 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700744 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000748 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000762dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Somnath Kotur93040ae2012-06-26 22:32:10 +0000774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000777{
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000784 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000796
797 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000799 if (unlikely(!skb))
800 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
Somnath Kotur93040ae2012-06-26 22:32:10 +0000814 return skb;
815}
816
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818{
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837}
838
839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840{
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842}
843
Sathya Perlaee9c7992013-05-22 23:04:55 +0000844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000846{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000848}
849
Sathya Perlaee9c7992013-05-22 23:04:55 +0000850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857
Somnath Kotur48265662013-05-26 21:08:47 +0000858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length.
861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000870 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000871 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000876 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
880
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000886 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000887
Somnath Kotur93040ae2012-06-26 22:32:10 +0000888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000917 if (unlikely(!skb))
918 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000919 }
920
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 return skb;
922tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925}
926
927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928{
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +0530938 if (!skb) {
939 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000940 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +0530941 }
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000943 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000945 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
946 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000947 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000948 int gso_segs = skb_shinfo(skb)->gso_segs;
949
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000950 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000951 BUG_ON(txo->sent_skb_list[start]);
952 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000954 /* Ensure txq has space for the next skb; Else stop the queue
955 * *BEFORE* ringing the tx doorbell, so that we serialze the
956 * tx compls of the current transmit which'll wake up the queue
957 */
Sathya Perla7101e112010-03-22 20:41:12 +0000958 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000959 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
960 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000961 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000962 stopped = true;
963 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700964
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000965 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000966
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000967 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000968 } else {
969 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +0530970 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000971 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700973 return NETDEV_TX_OK;
974}
975
976static int be_change_mtu(struct net_device *netdev, int new_mtu)
977{
978 struct be_adapter *adapter = netdev_priv(netdev);
979 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000980 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
981 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 dev_info(&adapter->pdev->dev,
983 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000984 BE_MIN_MTU,
985 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700986 return -EINVAL;
987 }
988 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
989 netdev->mtu, new_mtu);
990 netdev->mtu = new_mtu;
991 return 0;
992}
993
994/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000995 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
996 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997 */
Sathya Perla10329df2012-06-05 19:37:18 +0000998static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999{
Sathya Perla10329df2012-06-05 19:37:18 +00001000 u16 vids[BE_NUM_VLANS_SUPPORTED];
1001 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001002 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001003
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001004 /* No need to further configure vids if in promiscuous mode */
1005 if (adapter->promiscuous)
1006 return 0;
1007
Sathya Perla92bf14a2013-08-27 16:57:32 +05301008 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001009 goto set_vlan_promisc;
1010
1011 /* Construct VLAN Table to give to HW */
1012 for (i = 0; i < VLAN_N_VID; i++)
1013 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001014 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001015
1016 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001017 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001018
1019 /* Set to VLAN promisc mode as setting VLAN filter failed */
1020 if (status) {
1021 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1022 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1023 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001025
Sathya Perlab31c50a2009-09-17 10:30:13 -07001026 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001027
1028set_vlan_promisc:
1029 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1030 NULL, 0, 1, 1);
1031 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032}
1033
Patrick McHardy80d5c362013-04-19 02:04:28 +00001034static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035{
1036 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001037 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001039 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001040 status = -EINVAL;
1041 goto ret;
1042 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001043
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001044 /* Packets with VID 0 are always received by Lancer by default */
1045 if (lancer_chip(adapter) && vid == 0)
1046 goto ret;
1047
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301049 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001050 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001051
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001052 if (!status)
1053 adapter->vlans_added++;
1054 else
1055 adapter->vlan_tag[vid] = 0;
1056ret:
1057 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058}
1059
Patrick McHardy80d5c362013-04-19 02:04:28 +00001060static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061{
1062 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001063 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001065 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001066 status = -EINVAL;
1067 goto ret;
1068 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001069
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001070 /* Packets with VID 0 are always received by Lancer by default */
1071 if (lancer_chip(adapter) && vid == 0)
1072 goto ret;
1073
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301075 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001076 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001077
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001078 if (!status)
1079 adapter->vlans_added--;
1080 else
1081 adapter->vlan_tag[vid] = 1;
1082ret:
1083 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084}
1085
Sathya Perlaa54769f2011-10-24 02:45:00 +00001086static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087{
1088 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001089 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
1091 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001092 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001093 adapter->promiscuous = true;
1094 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001096
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001097 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001098 if (adapter->promiscuous) {
1099 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001100 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001101
1102 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001103 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001104 }
1105
Sathya Perlae7b909a2009-11-22 22:01:10 +00001106 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001107 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301108 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001109 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001110 goto done;
1111 }
1112
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001113 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1114 struct netdev_hw_addr *ha;
1115 int i = 1; /* First slot is claimed by the Primary MAC */
1116
1117 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1118 be_cmd_pmac_del(adapter, adapter->if_handle,
1119 adapter->pmac_id[i], 0);
1120 }
1121
Sathya Perla92bf14a2013-08-27 16:57:32 +05301122 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001123 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1124 adapter->promiscuous = true;
1125 goto done;
1126 }
1127
1128 netdev_for_each_uc_addr(ha, adapter->netdev) {
1129 adapter->uc_macs++; /* First slot is for Primary MAC */
1130 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1131 adapter->if_handle,
1132 &adapter->pmac_id[adapter->uc_macs], 0);
1133 }
1134 }
1135
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001136 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1137
1138 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1139 if (status) {
1140 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1141 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1142 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1143 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001144done:
1145 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001148static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001151 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001152 int status;
1153
Sathya Perla11ac75e2011-12-13 00:58:50 +00001154 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001155 return -EPERM;
1156
Sathya Perla11ac75e2011-12-13 00:58:50 +00001157 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001158 return -EINVAL;
1159
Sathya Perla3175d8c2013-07-23 15:25:03 +05301160 if (BEx_chip(adapter)) {
1161 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1162 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001163
Sathya Perla11ac75e2011-12-13 00:58:50 +00001164 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1165 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301166 } else {
1167 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1168 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001169 }
1170
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001171 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001172 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1173 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001174 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001175 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001176
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001177 return status;
1178}
1179
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001180static int be_get_vf_config(struct net_device *netdev, int vf,
1181 struct ifla_vf_info *vi)
1182{
1183 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001184 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001185
Sathya Perla11ac75e2011-12-13 00:58:50 +00001186 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001187 return -EPERM;
1188
Sathya Perla11ac75e2011-12-13 00:58:50 +00001189 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001190 return -EINVAL;
1191
1192 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001193 vi->tx_rate = vf_cfg->tx_rate;
1194 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001195 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001196 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001197
1198 return 0;
1199}
1200
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001201static int be_set_vf_vlan(struct net_device *netdev,
1202 int vf, u16 vlan, u8 qos)
1203{
1204 struct be_adapter *adapter = netdev_priv(netdev);
1205 int status = 0;
1206
Sathya Perla11ac75e2011-12-13 00:58:50 +00001207 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001208 return -EPERM;
1209
Sathya Perla11ac75e2011-12-13 00:58:50 +00001210 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001211 return -EINVAL;
1212
1213 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001214 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1215 /* If this is new value, program it. Else skip. */
1216 adapter->vf_cfg[vf].vlan_tag = vlan;
1217
1218 status = be_cmd_set_hsw_config(adapter, vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05001219 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001220 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001221 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001222 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001223 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001224 vlan = adapter->vf_cfg[vf].def_vid;
1225 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05001226 adapter->vf_cfg[vf].if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001227 }
1228
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001229
1230 if (status)
1231 dev_info(&adapter->pdev->dev,
1232 "VLAN %d config on VF %d failed\n", vlan, vf);
1233 return status;
1234}
1235
Ajit Khapardee1d18732010-07-23 01:52:13 +00001236static int be_set_vf_tx_rate(struct net_device *netdev,
1237 int vf, int rate)
1238{
1239 struct be_adapter *adapter = netdev_priv(netdev);
1240 int status = 0;
1241
Sathya Perla11ac75e2011-12-13 00:58:50 +00001242 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001243 return -EPERM;
1244
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001245 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001246 return -EINVAL;
1247
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001248 if (rate < 100 || rate > 10000) {
1249 dev_err(&adapter->pdev->dev,
1250 "tx rate must be between 100 and 10000 Mbps\n");
1251 return -EINVAL;
1252 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001253
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001254 if (lancer_chip(adapter))
1255 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1256 else
1257 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001258
1259 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001260 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001261 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001262 else
1263 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001264 return status;
1265}
1266
Sathya Perla2632baf2013-10-01 16:00:00 +05301267static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1268 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269{
Sathya Perla2632baf2013-10-01 16:00:00 +05301270 aic->rx_pkts_prev = rx_pkts;
1271 aic->tx_reqs_prev = tx_pkts;
1272 aic->jiffies = now;
1273}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001274
Sathya Perla2632baf2013-10-01 16:00:00 +05301275static void be_eqd_update(struct be_adapter *adapter)
1276{
1277 struct be_set_eqd set_eqd[MAX_EVT_QS];
1278 int eqd, i, num = 0, start;
1279 struct be_aic_obj *aic;
1280 struct be_eq_obj *eqo;
1281 struct be_rx_obj *rxo;
1282 struct be_tx_obj *txo;
1283 u64 rx_pkts, tx_pkts;
1284 ulong now;
1285 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286
Sathya Perla2632baf2013-10-01 16:00:00 +05301287 for_all_evt_queues(adapter, eqo, i) {
1288 aic = &adapter->aic_obj[eqo->idx];
1289 if (!aic->enable) {
1290 if (aic->jiffies)
1291 aic->jiffies = 0;
1292 eqd = aic->et_eqd;
1293 goto modify_eqd;
1294 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295
Sathya Perla2632baf2013-10-01 16:00:00 +05301296 rxo = &adapter->rx_obj[eqo->idx];
1297 do {
1298 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1299 rx_pkts = rxo->stats.rx_pkts;
1300 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001301
Sathya Perla2632baf2013-10-01 16:00:00 +05301302 txo = &adapter->tx_obj[eqo->idx];
1303 do {
1304 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1305 tx_pkts = txo->stats.tx_reqs;
1306 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001307
Sathya Perla4097f662009-03-24 16:40:13 -07001308
Sathya Perla2632baf2013-10-01 16:00:00 +05301309 /* Skip, if wrapped around or first calculation */
1310 now = jiffies;
1311 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1312 rx_pkts < aic->rx_pkts_prev ||
1313 tx_pkts < aic->tx_reqs_prev) {
1314 be_aic_update(aic, rx_pkts, tx_pkts, now);
1315 continue;
1316 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001317
Sathya Perla2632baf2013-10-01 16:00:00 +05301318 delta = jiffies_to_msecs(now - aic->jiffies);
1319 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1320 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1321 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322
Sathya Perla2632baf2013-10-01 16:00:00 +05301323 if (eqd < 8)
1324 eqd = 0;
1325 eqd = min_t(u32, eqd, aic->max_eqd);
1326 eqd = max_t(u32, eqd, aic->min_eqd);
1327
1328 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001329modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301330 if (eqd != aic->prev_eqd) {
1331 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1332 set_eqd[num].eq_id = eqo->q.id;
1333 aic->prev_eqd = eqd;
1334 num++;
1335 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001336 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301337
1338 if (num)
1339 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001340}
1341
Sathya Perla3abcded2010-10-03 22:12:27 -07001342static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001343 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001344{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001345 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001346
Sathya Perlaab1594e2011-07-25 19:10:15 +00001347 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001348 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001349 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001350 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001351 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001352 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001353 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001354 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001355 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356}
1357
Sathya Perla2e588f82011-03-11 02:49:26 +00001358static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001359{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001360 /* L4 checksum is not reliable for non TCP/UDP packets.
1361 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001362 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1363 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001364}
1365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1367 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001369 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001371 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
Sathya Perla3abcded2010-10-03 22:12:27 -07001373 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374 BUG_ON(!rx_page_info->page);
1375
Ajit Khaparde205859a2010-02-09 01:34:21 +00001376 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001377 dma_unmap_page(&adapter->pdev->dev,
1378 dma_unmap_addr(rx_page_info, bus),
1379 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001380 rx_page_info->last_page_user = false;
1381 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382
1383 atomic_dec(&rxq->used);
1384 return rx_page_info;
1385}
1386
1387/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001388static void be_rx_compl_discard(struct be_rx_obj *rxo,
1389 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390{
Sathya Perla3abcded2010-10-03 22:12:27 -07001391 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001393 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001395 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001396 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001397 put_page(page_info->page);
1398 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001399 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 }
1401}
1402
1403/*
1404 * skb_fill_rx_data forms a complete skb for an ether frame
1405 * indicated by rxcp.
1406 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001407static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1408 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409{
Sathya Perla3abcded2010-10-03 22:12:27 -07001410 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001412 u16 i, j;
1413 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 u8 *start;
1415
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001416 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 start = page_address(page_info->page) + page_info->page_offset;
1418 prefetch(start);
1419
1420 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001421 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 skb->len = curr_frag_len;
1424 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001425 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 /* Complete packet has now been moved to data */
1427 put_page(page_info->page);
1428 skb->data_len = 0;
1429 skb->tail += curr_frag_len;
1430 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001431 hdr_len = ETH_HLEN;
1432 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001434 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 skb_shinfo(skb)->frags[0].page_offset =
1436 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001437 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001439 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 skb->tail += hdr_len;
1441 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001442 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443
Sathya Perla2e588f82011-03-11 02:49:26 +00001444 if (rxcp->pkt_size <= rx_frag_size) {
1445 BUG_ON(rxcp->num_rcvd != 1);
1446 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
1448
1449 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001450 index_inc(&rxcp->rxq_idx, rxq->len);
1451 remaining = rxcp->pkt_size - curr_frag_len;
1452 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001453 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001454 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001456 /* Coalesce all frags from the same physical page in one slot */
1457 if (page_info->page_offset == 0) {
1458 /* Fresh page */
1459 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001460 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001461 skb_shinfo(skb)->frags[j].page_offset =
1462 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001463 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001464 skb_shinfo(skb)->nr_frags++;
1465 } else {
1466 put_page(page_info->page);
1467 }
1468
Eric Dumazet9e903e02011-10-18 21:00:24 +00001469 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 skb->len += curr_frag_len;
1471 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001472 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001473 remaining -= curr_frag_len;
1474 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001475 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001477 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478}
1479
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001480/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001481static void be_rx_compl_process(struct be_rx_obj *rxo,
1482 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001484 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001485 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001487
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001488 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001489 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001490 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001491 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 return;
1493 }
1494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001495 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001497 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001498 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001499 else
1500 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001502 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001503 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001504 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001505 skb->rxhash = rxcp->rss_hash;
1506
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507
Jiri Pirko343e43c2011-08-25 02:50:51 +00001508 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001509 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001510
1511 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512}
1513
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001514/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001515static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1516 struct napi_struct *napi,
1517 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001521 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001522 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 u16 remaining, curr_frag_len;
1524 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001525
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001526 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001527 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001528 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001529 return;
1530 }
1531
Sathya Perla2e588f82011-03-11 02:49:26 +00001532 remaining = rxcp->pkt_size;
1533 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001534 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535
1536 curr_frag_len = min(remaining, rx_frag_size);
1537
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001538 /* Coalesce all frags from the same physical page in one slot */
1539 if (i == 0 || page_info->page_offset == 0) {
1540 /* First frag or Fresh page */
1541 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001542 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001543 skb_shinfo(skb)->frags[j].page_offset =
1544 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001545 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001546 } else {
1547 put_page(page_info->page);
1548 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001549 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001550 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001552 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 memset(page_info, 0, sizeof(*page_info));
1554 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001555 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001557 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001558 skb->len = rxcp->pkt_size;
1559 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001560 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001561 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001562 if (adapter->netdev->features & NETIF_F_RXHASH)
1563 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001564
Jiri Pirko343e43c2011-08-25 02:50:51 +00001565 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001566 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001567
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001568 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569}
1570
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1572 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573{
Sathya Perla2e588f82011-03-11 02:49:26 +00001574 rxcp->pkt_size =
1575 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1576 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1577 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1578 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001579 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001580 rxcp->ip_csum =
1581 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1582 rxcp->l4_csum =
1583 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1584 rxcp->ipv6 =
1585 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1586 rxcp->rxq_idx =
1587 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1588 rxcp->num_rcvd =
1589 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1590 rxcp->pkt_type =
1591 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001592 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001593 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001594 if (rxcp->vlanf) {
1595 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001596 compl);
1597 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1598 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001599 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001600 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001601}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001603static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1604 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001605{
1606 rxcp->pkt_size =
1607 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1608 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1609 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1610 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001611 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001612 rxcp->ip_csum =
1613 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1614 rxcp->l4_csum =
1615 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1616 rxcp->ipv6 =
1617 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1618 rxcp->rxq_idx =
1619 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1620 rxcp->num_rcvd =
1621 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1622 rxcp->pkt_type =
1623 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001624 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001625 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001626 if (rxcp->vlanf) {
1627 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001628 compl);
1629 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1630 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001631 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001632 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001633 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1634 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001635}
1636
1637static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1638{
1639 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1640 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1641 struct be_adapter *adapter = rxo->adapter;
1642
1643 /* For checking the valid bit it is Ok to use either definition as the
1644 * valid bit is at the same position in both v0 and v1 Rx compl */
1645 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 return NULL;
1647
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001648 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001649 be_dws_le_to_cpu(compl, sizeof(*compl));
1650
1651 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001652 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001653 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001654 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001655
Somnath Koture38b1702013-05-29 22:55:56 +00001656 if (rxcp->ip_frag)
1657 rxcp->l4_csum = 0;
1658
Sathya Perla15d72182011-03-21 20:49:26 +00001659 if (rxcp->vlanf) {
1660 /* vlanf could be wrongly set in some cards.
1661 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001662 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001663 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001664
Sathya Perla15d72182011-03-21 20:49:26 +00001665 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001666 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001667
Somnath Kotur939cf302011-08-18 21:51:49 -07001668 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001669 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001670 rxcp->vlanf = 0;
1671 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001672
1673 /* As the compl has been parsed, reset it; we wont touch it again */
1674 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Sathya Perla3abcded2010-10-03 22:12:27 -07001676 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 return rxcp;
1678}
1679
Eric Dumazet1829b082011-03-01 05:48:12 +00001680static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001683
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001685 gfp |= __GFP_COMP;
1686 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687}
1688
1689/*
1690 * Allocate a page, split it to fragments of size rx_frag_size and post as
1691 * receive buffers to BE
1692 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001693static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694{
Sathya Perla3abcded2010-10-03 22:12:27 -07001695 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001696 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001697 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 struct page *pagep = NULL;
1699 struct be_eth_rx_d *rxd;
1700 u64 page_dmaaddr = 0, frag_dmaaddr;
1701 u32 posted, page_offset = 0;
1702
Sathya Perla3abcded2010-10-03 22:12:27 -07001703 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1705 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001706 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001708 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 break;
1710 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001711 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1712 0, adapter->big_page_size,
1713 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 page_info->page_offset = 0;
1715 } else {
1716 get_page(pagep);
1717 page_info->page_offset = page_offset + rx_frag_size;
1718 }
1719 page_offset = page_info->page_offset;
1720 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001721 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1723
1724 rxd = queue_head_node(rxq);
1725 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1726 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727
1728 /* Any space left in the current big page for another frag? */
1729 if ((page_offset + rx_frag_size + rx_frag_size) >
1730 adapter->big_page_size) {
1731 pagep = NULL;
1732 page_info->last_page_user = true;
1733 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001734
1735 prev_page_info = page_info;
1736 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001737 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 }
1739 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001740 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741
1742 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001744 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001745 } else if (atomic_read(&rxq->used) == 0) {
1746 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749}
1750
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1754
1755 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1756 return NULL;
1757
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001758 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1760
1761 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1762
1763 queue_tail_inc(tx_cq);
1764 return txcp;
1765}
1766
Sathya Perla3c8def92011-06-12 20:01:58 +00001767static u16 be_tx_compl_process(struct be_adapter *adapter,
1768 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769{
Sathya Perla3c8def92011-06-12 20:01:58 +00001770 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001771 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001774 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1775 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001777 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001779 sent_skbs[txq->tail] = NULL;
1780
1781 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001782 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001784 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001786 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001787 unmap_tx_frag(&adapter->pdev->dev, wrb,
1788 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001789 unmap_skb_hdr = false;
1790
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791 num_wrbs++;
1792 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001793 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001796 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797}
1798
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001799/* Return the number of events in the event queue */
1800static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001801{
1802 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001803 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001804
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001805 do {
1806 eqe = queue_tail_node(&eqo->q);
1807 if (eqe->evt == 0)
1808 break;
1809
1810 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001811 eqe->evt = 0;
1812 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813 queue_tail_inc(&eqo->q);
1814 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001815
1816 return num;
1817}
1818
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001819/* Leaves the EQ is disarmed state */
1820static void be_eq_clean(struct be_eq_obj *eqo)
1821{
1822 int num = events_get(eqo);
1823
1824 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1825}
1826
1827static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828{
1829 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 struct be_queue_info *rxq = &rxo->q;
1831 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001832 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001833 struct be_adapter *adapter = rxo->adapter;
1834 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 u16 tail;
1836
Sathya Perlad23e9462012-12-17 19:38:51 +00001837 /* Consume pending rx completions.
1838 * Wait for the flush completion (identified by zero num_rcvd)
1839 * to arrive. Notify CQ even when there are no more CQ entries
1840 * for HW to flush partially coalesced CQ entries.
1841 * In Lancer, there is no need to wait for flush compl.
1842 */
1843 for (;;) {
1844 rxcp = be_rx_compl_get(rxo);
1845 if (rxcp == NULL) {
1846 if (lancer_chip(adapter))
1847 break;
1848
1849 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1850 dev_warn(&adapter->pdev->dev,
1851 "did not receive flush compl\n");
1852 break;
1853 }
1854 be_cq_notify(adapter, rx_cq->id, true, 0);
1855 mdelay(1);
1856 } else {
1857 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001858 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001859 if (rxcp->num_rcvd == 0)
1860 break;
1861 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 }
1863
Sathya Perlad23e9462012-12-17 19:38:51 +00001864 /* After cleanup, leave the CQ in unarmed state */
1865 be_cq_notify(adapter, rx_cq->id, false, 0);
1866
1867 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001869 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001870 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 put_page(page_info->page);
1872 memset(page_info, 0, sizeof(*page_info));
1873 }
1874 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001875 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876}
1877
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001878static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001880 struct be_tx_obj *txo;
1881 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001882 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001883 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001884 struct sk_buff *sent_skb;
1885 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001886 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887
Sathya Perlaa8e91792009-08-10 03:42:43 +00001888 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1889 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001890 pending_txqs = adapter->num_tx_qs;
1891
1892 for_all_tx_queues(adapter, txo, i) {
1893 txq = &txo->q;
1894 while ((txcp = be_tx_compl_get(&txo->cq))) {
1895 end_idx =
1896 AMAP_GET_BITS(struct amap_eth_tx_compl,
1897 wrb_index, txcp);
1898 num_wrbs += be_tx_compl_process(adapter, txo,
1899 end_idx);
1900 cmpl++;
1901 }
1902 if (cmpl) {
1903 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1904 atomic_sub(num_wrbs, &txq->used);
1905 cmpl = 0;
1906 num_wrbs = 0;
1907 }
1908 if (atomic_read(&txq->used) == 0)
1909 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001910 }
1911
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001912 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001913 break;
1914
1915 mdelay(1);
1916 } while (true);
1917
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001918 for_all_tx_queues(adapter, txo, i) {
1919 txq = &txo->q;
1920 if (atomic_read(&txq->used))
1921 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1922 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001923
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001924 /* free posted tx for which compls will never arrive */
1925 while (atomic_read(&txq->used)) {
1926 sent_skb = txo->sent_skb_list[txq->tail];
1927 end_idx = txq->tail;
1928 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1929 &dummy_wrb);
1930 index_adv(&end_idx, num_wrbs - 1, txq->len);
1931 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1932 atomic_sub(num_wrbs, &txq->used);
1933 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001934 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935}
1936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937static void be_evt_queues_destroy(struct be_adapter *adapter)
1938{
1939 struct be_eq_obj *eqo;
1940 int i;
1941
1942 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001943 if (eqo->q.created) {
1944 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301946 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001947 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 be_queue_free(adapter, &eqo->q);
1949 }
1950}
1951
1952static int be_evt_queues_create(struct be_adapter *adapter)
1953{
1954 struct be_queue_info *eq;
1955 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05301956 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 int i, rc;
1958
Sathya Perla92bf14a2013-08-27 16:57:32 +05301959 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1960 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961
1962 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301963 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1964 BE_NAPI_WEIGHT);
Sathya Perla2632baf2013-10-01 16:00:00 +05301965 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001966 eqo->adapter = adapter;
1967 eqo->tx_budget = BE_TX_BUDGET;
1968 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05301969 aic->max_eqd = BE_MAX_EQD;
1970 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971
1972 eq = &eqo->q;
1973 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1974 sizeof(struct be_eq_entry));
1975 if (rc)
1976 return rc;
1977
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301978 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979 if (rc)
1980 return rc;
1981 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001982 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983}
1984
Sathya Perla5fb379e2009-06-18 00:02:59 +00001985static void be_mcc_queues_destroy(struct be_adapter *adapter)
1986{
1987 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001988
Sathya Perla8788fdc2009-07-27 22:52:03 +00001989 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001990 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001991 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001992 be_queue_free(adapter, q);
1993
Sathya Perla8788fdc2009-07-27 22:52:03 +00001994 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001995 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001996 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001997 be_queue_free(adapter, q);
1998}
1999
2000/* Must be called only after TX qs are created as MCC shares TX EQ */
2001static int be_mcc_queues_create(struct be_adapter *adapter)
2002{
2003 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002004
Sathya Perla8788fdc2009-07-27 22:52:03 +00002005 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002006 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002007 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002008 goto err;
2009
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 /* Use the default EQ for MCC completions */
2011 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002012 goto mcc_cq_free;
2013
Sathya Perla8788fdc2009-07-27 22:52:03 +00002014 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002015 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2016 goto mcc_cq_destroy;
2017
Sathya Perla8788fdc2009-07-27 22:52:03 +00002018 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002019 goto mcc_q_free;
2020
2021 return 0;
2022
2023mcc_q_free:
2024 be_queue_free(adapter, q);
2025mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002026 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002027mcc_cq_free:
2028 be_queue_free(adapter, cq);
2029err:
2030 return -1;
2031}
2032
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033static void be_tx_queues_destroy(struct be_adapter *adapter)
2034{
2035 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002036 struct be_tx_obj *txo;
2037 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla3c8def92011-06-12 20:01:58 +00002039 for_all_tx_queues(adapter, txo, i) {
2040 q = &txo->q;
2041 if (q->created)
2042 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2043 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044
Sathya Perla3c8def92011-06-12 20:01:58 +00002045 q = &txo->cq;
2046 if (q->created)
2047 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2048 be_queue_free(adapter, q);
2049 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050}
2051
Sathya Perla77071332013-08-27 16:57:34 +05302052static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002054 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002055 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302056 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057
Sathya Perla92bf14a2013-08-27 16:57:32 +05302058 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002059
Sathya Perla3c8def92011-06-12 20:01:58 +00002060 for_all_tx_queues(adapter, txo, i) {
2061 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002062 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2063 sizeof(struct be_eth_tx_compl));
2064 if (status)
2065 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 /* If num_evt_qs is less than num_tx_qs, then more than
2068 * one txq share an eq
2069 */
2070 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2071 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2072 if (status)
2073 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002075 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2076 sizeof(struct be_eth_wrb));
2077 if (status)
2078 return status;
2079
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002080 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002081 if (status)
2082 return status;
2083 }
2084
Sathya Perlad3791422012-09-28 04:39:44 +00002085 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2086 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002087 return 0;
2088}
2089
2090static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091{
2092 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002093 struct be_rx_obj *rxo;
2094 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095
Sathya Perla3abcded2010-10-03 22:12:27 -07002096 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002097 q = &rxo->cq;
2098 if (q->created)
2099 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2100 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102}
2103
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002104static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002107 struct be_rx_obj *rxo;
2108 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109
Sathya Perla92bf14a2013-08-27 16:57:32 +05302110 /* We can create as many RSS rings as there are EQs. */
2111 adapter->num_rx_qs = adapter->num_evt_qs;
2112
2113 /* We'll use RSS only if atleast 2 RSS rings are supported.
2114 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302116 if (adapter->num_rx_qs > 1)
2117 adapter->num_rx_qs++;
2118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002120 for_all_rx_queues(adapter, rxo, i) {
2121 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 cq = &rxo->cq;
2123 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2124 sizeof(struct be_eth_rx_compl));
2125 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2129 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133
Sathya Perlad3791422012-09-28 04:39:44 +00002134 dev_info(&adapter->pdev->dev,
2135 "created %d RSS queue(s) and 1 default RX queue\n",
2136 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002138}
2139
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140static irqreturn_t be_intx(int irq, void *dev)
2141{
Sathya Perlae49cc342012-11-27 19:50:02 +00002142 struct be_eq_obj *eqo = dev;
2143 struct be_adapter *adapter = eqo->adapter;
2144 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002146 /* IRQ is not expected when NAPI is scheduled as the EQ
2147 * will not be armed.
2148 * But, this can happen on Lancer INTx where it takes
2149 * a while to de-assert INTx or in BE2 where occasionaly
2150 * an interrupt may be raised even when EQ is unarmed.
2151 * If NAPI is already scheduled, then counting & notifying
2152 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002153 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002154 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002155 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002156 __napi_schedule(&eqo->napi);
2157 if (num_evts)
2158 eqo->spurious_intr = 0;
2159 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002160 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002161
2162 /* Return IRQ_HANDLED only for the the first spurious intr
2163 * after a valid intr to stop the kernel from branding
2164 * this irq as a bad one!
2165 */
2166 if (num_evts || eqo->spurious_intr++ == 0)
2167 return IRQ_HANDLED;
2168 else
2169 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170}
2171
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002172static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175
Sathya Perla0b545a62012-11-23 00:27:18 +00002176 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2177 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178 return IRQ_HANDLED;
2179}
2180
Sathya Perla2e588f82011-03-11 02:49:26 +00002181static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182{
Somnath Koture38b1702013-05-29 22:55:56 +00002183 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184}
2185
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2187 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188{
Sathya Perla3abcded2010-10-03 22:12:27 -07002189 struct be_adapter *adapter = rxo->adapter;
2190 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002191 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192 u32 work_done;
2193
2194 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002195 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196 if (!rxcp)
2197 break;
2198
Sathya Perla12004ae2011-08-02 19:57:46 +00002199 /* Is it a flush compl that has no data */
2200 if (unlikely(rxcp->num_rcvd == 0))
2201 goto loop_continue;
2202
2203 /* Discard compl with partial DMA Lancer B0 */
2204 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002206 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002207 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002208
Sathya Perla12004ae2011-08-02 19:57:46 +00002209 /* On BE drop pkts that arrive due to imperfect filtering in
2210 * promiscuous mode on some skews
2211 */
2212 if (unlikely(rxcp->port != adapter->port_num &&
2213 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002215 goto loop_continue;
2216 }
2217
2218 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002220 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002222loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002223 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224 }
2225
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226 if (work_done) {
2227 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002228
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002229 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2230 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 return work_done;
2234}
2235
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2237 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 for (work_done = 0; work_done < budget; work_done++) {
2243 txcp = be_tx_compl_get(&txo->cq);
2244 if (!txcp)
2245 break;
2246 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002247 AMAP_GET_BITS(struct amap_eth_tx_compl,
2248 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 }
2250
2251 if (work_done) {
2252 be_cq_notify(adapter, txo->cq.id, true, work_done);
2253 atomic_sub(num_wrbs, &txo->q.used);
2254
2255 /* As Tx wrbs have been freed up, wake up netdev queue
2256 * if it was stopped due to lack of tx wrbs. */
2257 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2258 atomic_read(&txo->q.used) < txo->q.len / 2) {
2259 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002260 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2263 tx_stats(txo)->tx_compl += work_done;
2264 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2265 }
2266 return (work_done < budget); /* Done */
2267}
Sathya Perla3c8def92011-06-12 20:01:58 +00002268
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302269int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270{
2271 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2272 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002273 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002275
Sathya Perla0b545a62012-11-23 00:27:18 +00002276 num_evts = events_get(eqo);
2277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 /* Process all TXQs serviced by this EQ */
2279 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2280 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2281 eqo->tx_budget, i);
2282 if (!tx_done)
2283 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284 }
2285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 /* This loop will iterate twice for EQ0 in which
2287 * completions of the last RXQ (default one) are also processed
2288 * For other EQs the loop iterates only once
2289 */
2290 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2291 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2292 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002293 }
2294
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 if (is_mcc_eqo(eqo))
2296 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002297
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 if (max_work < budget) {
2299 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002300 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 } else {
2302 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002303 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002304 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306}
2307
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002308void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002309{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002310 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2311 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002312 u32 i;
2313
Sathya Perlad23e9462012-12-17 19:38:51 +00002314 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002315 return;
2316
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002317 if (lancer_chip(adapter)) {
2318 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2319 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2320 sliport_err1 = ioread32(adapter->db +
2321 SLIPORT_ERROR1_OFFSET);
2322 sliport_err2 = ioread32(adapter->db +
2323 SLIPORT_ERROR2_OFFSET);
2324 }
2325 } else {
2326 pci_read_config_dword(adapter->pdev,
2327 PCICFG_UE_STATUS_LOW, &ue_lo);
2328 pci_read_config_dword(adapter->pdev,
2329 PCICFG_UE_STATUS_HIGH, &ue_hi);
2330 pci_read_config_dword(adapter->pdev,
2331 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2332 pci_read_config_dword(adapter->pdev,
2333 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002334
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002335 ue_lo = (ue_lo & ~ue_lo_mask);
2336 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002337 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002338
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002339 /* On certain platforms BE hardware can indicate spurious UEs.
2340 * Allow the h/w to stop working completely in case of a real UE.
2341 * Hence not setting the hw_error for UE detection.
2342 */
2343 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002344 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002345 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002346 "Error detected in the card\n");
2347 }
2348
2349 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2350 dev_err(&adapter->pdev->dev,
2351 "ERR: sliport status 0x%x\n", sliport_status);
2352 dev_err(&adapter->pdev->dev,
2353 "ERR: sliport error1 0x%x\n", sliport_err1);
2354 dev_err(&adapter->pdev->dev,
2355 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002356 }
2357
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002358 if (ue_lo) {
2359 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2360 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002361 dev_err(&adapter->pdev->dev,
2362 "UE: %s bit set\n", ue_status_low_desc[i]);
2363 }
2364 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002365
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002366 if (ue_hi) {
2367 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2368 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002369 dev_err(&adapter->pdev->dev,
2370 "UE: %s bit set\n", ue_status_hi_desc[i]);
2371 }
2372 }
2373
2374}
2375
Sathya Perla8d56ff12009-11-22 22:02:26 +00002376static void be_msix_disable(struct be_adapter *adapter)
2377{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002378 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002379 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002380 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302381 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 }
2383}
2384
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002385static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302387 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002388 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389
Sathya Perla92bf14a2013-08-27 16:57:32 +05302390 /* If RoCE is supported, program the max number of NIC vectors that
2391 * may be configured via set-channels, along with vectors needed for
2392 * RoCe. Else, just program the number we'll use initially.
2393 */
2394 if (be_roce_supported(adapter))
2395 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2396 2 * num_online_cpus());
2397 else
2398 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002399
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002400 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401 adapter->msix_entries[i].entry = i;
2402
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002403 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002404 if (status == 0) {
2405 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302406 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002407 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002408 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2409 num_vec);
2410 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002411 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002412 }
Sathya Perlad3791422012-09-28 04:39:44 +00002413
2414 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302415
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002416 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2417 if (!be_physfn(adapter))
2418 return status;
2419 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002420done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302421 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2422 adapter->num_msix_roce_vec = num_vec / 2;
2423 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2424 adapter->num_msix_roce_vec);
2425 }
2426
2427 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2428
2429 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2430 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002431 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002432}
2433
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002434static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302437 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002438}
2439
2440static int be_msix_register(struct be_adapter *adapter)
2441{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 struct net_device *netdev = adapter->netdev;
2443 struct be_eq_obj *eqo;
2444 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446 for_all_evt_queues(adapter, eqo, i) {
2447 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2448 vec = be_msix_vec_get(adapter, eqo);
2449 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002450 if (status)
2451 goto err_msix;
2452 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002453
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002454 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002455err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2457 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2458 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2459 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002460 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461 return status;
2462}
2463
2464static int be_irq_register(struct be_adapter *adapter)
2465{
2466 struct net_device *netdev = adapter->netdev;
2467 int status;
2468
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002469 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470 status = be_msix_register(adapter);
2471 if (status == 0)
2472 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002473 /* INTx is not supported for VF */
2474 if (!be_physfn(adapter))
2475 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476 }
2477
Sathya Perlae49cc342012-11-27 19:50:02 +00002478 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479 netdev->irq = adapter->pdev->irq;
2480 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002481 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482 if (status) {
2483 dev_err(&adapter->pdev->dev,
2484 "INTx request IRQ failed - err %d\n", status);
2485 return status;
2486 }
2487done:
2488 adapter->isr_registered = true;
2489 return 0;
2490}
2491
2492static void be_irq_unregister(struct be_adapter *adapter)
2493{
2494 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002495 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002496 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
2498 if (!adapter->isr_registered)
2499 return;
2500
2501 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002502 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002503 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504 goto done;
2505 }
2506
2507 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002508 for_all_evt_queues(adapter, eqo, i)
2509 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002510
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511done:
2512 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513}
2514
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002515static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002516{
2517 struct be_queue_info *q;
2518 struct be_rx_obj *rxo;
2519 int i;
2520
2521 for_all_rx_queues(adapter, rxo, i) {
2522 q = &rxo->q;
2523 if (q->created) {
2524 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002526 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002527 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002528 }
2529}
2530
Sathya Perla889cd4b2010-05-30 23:33:45 +00002531static int be_close(struct net_device *netdev)
2532{
2533 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002534 struct be_eq_obj *eqo;
2535 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002536
Parav Pandit045508a2012-03-26 14:27:13 +00002537 be_roce_dev_close(adapter);
2538
Somnath Kotur04d3d622013-05-02 03:36:55 +00002539 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2540 for_all_evt_queues(adapter, eqo, i)
2541 napi_disable(&eqo->napi);
2542 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2543 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002544
2545 be_async_mcc_disable(adapter);
2546
2547 /* Wait for all pending tx completions to arrive so that
2548 * all tx skbs are freed.
2549 */
Sathya Perlafba87552013-05-08 02:05:50 +00002550 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302551 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002552
2553 be_rx_qs_destroy(adapter);
2554
2555 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002556 if (msix_enabled(adapter))
2557 synchronize_irq(be_msix_vec_get(adapter, eqo));
2558 else
2559 synchronize_irq(netdev->irq);
2560 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002561 }
2562
Sathya Perla889cd4b2010-05-30 23:33:45 +00002563 be_irq_unregister(adapter);
2564
Sathya Perla482c9e72011-06-29 23:33:17 +00002565 return 0;
2566}
2567
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002568static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002569{
2570 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002571 int rc, i, j;
2572 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002573
2574 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2576 sizeof(struct be_eth_rx_d));
2577 if (rc)
2578 return rc;
2579 }
2580
2581 /* The FW would like the default RXQ to be created first */
2582 rxo = default_rxo(adapter);
2583 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2584 adapter->if_handle, false, &rxo->rss_id);
2585 if (rc)
2586 return rc;
2587
2588 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002589 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590 rx_frag_size, adapter->if_handle,
2591 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002592 if (rc)
2593 return rc;
2594 }
2595
2596 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002597 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2598 for_all_rss_queues(adapter, rxo, i) {
2599 if ((j + i) >= 128)
2600 break;
2601 rsstable[j + i] = rxo->rss_id;
2602 }
2603 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002604 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2605 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2606
2607 if (!BEx_chip(adapter))
2608 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2609 RSS_ENABLE_UDP_IPV6;
2610
2611 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2612 128);
2613 if (rc) {
2614 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002615 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002616 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002617 }
2618
2619 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002620 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002621 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002622 return 0;
2623}
2624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002625static int be_open(struct net_device *netdev)
2626{
2627 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002628 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002629 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002631 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002632 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002635 if (status)
2636 goto err;
2637
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002638 status = be_irq_register(adapter);
2639 if (status)
2640 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002643 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 for_all_tx_queues(adapter, txo, i)
2646 be_cq_notify(adapter, txo->cq.id, true, 0);
2647
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002648 be_async_mcc_enable(adapter);
2649
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002650 for_all_evt_queues(adapter, eqo, i) {
2651 napi_enable(&eqo->napi);
2652 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2653 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002654 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655
Sathya Perla323ff712012-09-28 04:39:43 +00002656 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002657 if (!status)
2658 be_link_status_update(adapter, link_status);
2659
Sathya Perlafba87552013-05-08 02:05:50 +00002660 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002661 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002662 return 0;
2663err:
2664 be_close(adapter->netdev);
2665 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002666}
2667
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002668static int be_setup_wol(struct be_adapter *adapter, bool enable)
2669{
2670 struct be_dma_mem cmd;
2671 int status = 0;
2672 u8 mac[ETH_ALEN];
2673
2674 memset(mac, 0, ETH_ALEN);
2675
2676 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002677 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2678 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002679 if (cmd.va == NULL)
2680 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002681
2682 if (enable) {
2683 status = pci_write_config_dword(adapter->pdev,
2684 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2685 if (status) {
2686 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002687 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002688 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2689 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002690 return status;
2691 }
2692 status = be_cmd_enable_magic_wol(adapter,
2693 adapter->netdev->dev_addr, &cmd);
2694 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2695 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2696 } else {
2697 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2698 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2699 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2700 }
2701
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002702 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002703 return status;
2704}
2705
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002706/*
2707 * Generate a seed MAC address from the PF MAC Address using jhash.
2708 * MAC Address for VFs are assigned incrementally starting from the seed.
2709 * These addresses are programmed in the ASIC by the PF and the VF driver
2710 * queries for the MAC address during its probe.
2711 */
Sathya Perla4c876612013-02-03 20:30:11 +00002712static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002713{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002714 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002715 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002716 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002717 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002718
2719 be_vf_eth_addr_generate(adapter, mac);
2720
Sathya Perla11ac75e2011-12-13 00:58:50 +00002721 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302722 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002723 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002724 vf_cfg->if_handle,
2725 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302726 else
2727 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2728 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002729
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002730 if (status)
2731 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002732 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002733 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002734 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002735
2736 mac[5] += 1;
2737 }
2738 return status;
2739}
2740
Sathya Perla4c876612013-02-03 20:30:11 +00002741static int be_vfs_mac_query(struct be_adapter *adapter)
2742{
2743 int status, vf;
2744 u8 mac[ETH_ALEN];
2745 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302746 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002747
2748 for_all_vfs(adapter, vf_cfg, vf) {
2749 be_cmd_get_mac_from_list(adapter, mac, &active,
2750 &vf_cfg->pmac_id, 0);
2751
2752 status = be_cmd_mac_addr_query(adapter, mac, false,
2753 vf_cfg->if_handle, 0);
2754 if (status)
2755 return status;
2756 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2757 }
2758 return 0;
2759}
2760
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002761static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002762{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002763 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002764 u32 vf;
2765
Sathya Perla257a3fe2013-06-14 15:54:51 +05302766 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002767 dev_warn(&adapter->pdev->dev,
2768 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002769 goto done;
2770 }
2771
Sathya Perlab4c1df92013-05-08 02:05:47 +00002772 pci_disable_sriov(adapter->pdev);
2773
Sathya Perla11ac75e2011-12-13 00:58:50 +00002774 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302775 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002776 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2777 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302778 else
2779 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2780 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002781
Sathya Perla11ac75e2011-12-13 00:58:50 +00002782 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2783 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002784done:
2785 kfree(adapter->vf_cfg);
2786 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002787}
2788
Sathya Perla77071332013-08-27 16:57:34 +05302789static void be_clear_queues(struct be_adapter *adapter)
2790{
2791 be_mcc_queues_destroy(adapter);
2792 be_rx_cqs_destroy(adapter);
2793 be_tx_queues_destroy(adapter);
2794 be_evt_queues_destroy(adapter);
2795}
2796
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302797static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002798{
Sathya Perla191eb752012-02-23 18:50:13 +00002799 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2800 cancel_delayed_work_sync(&adapter->work);
2801 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2802 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302803}
2804
2805static int be_clear(struct be_adapter *adapter)
2806{
2807 int i;
2808
2809 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002810
Sathya Perla11ac75e2011-12-13 00:58:50 +00002811 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002812 be_vf_clear(adapter);
2813
Sathya Perla2d17f402013-07-23 15:25:04 +05302814 /* delete the primary mac along with the uc-mac list */
2815 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002816 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302817 adapter->pmac_id[i], 0);
2818 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002819
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002820 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002821
Sathya Perla77071332013-08-27 16:57:34 +05302822 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002823
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002824 kfree(adapter->pmac_id);
2825 adapter->pmac_id = NULL;
2826
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002827 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002828 return 0;
2829}
2830
Sathya Perla4c876612013-02-03 20:30:11 +00002831static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002832{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302833 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002834 struct be_vf_cfg *vf_cfg;
2835 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002836 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002837
Sathya Perla4c876612013-02-03 20:30:11 +00002838 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2839 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002840
Sathya Perla4c876612013-02-03 20:30:11 +00002841 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302842 if (!BE3_chip(adapter)) {
2843 status = be_cmd_get_profile_config(adapter, &res,
2844 vf + 1);
2845 if (!status)
2846 cap_flags = res.if_cap_flags;
2847 }
Sathya Perla4c876612013-02-03 20:30:11 +00002848
2849 /* If a FW profile exists, then cap_flags are updated */
2850 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2851 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2852 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2853 &vf_cfg->if_handle, vf + 1);
2854 if (status)
2855 goto err;
2856 }
2857err:
2858 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002859}
2860
Sathya Perla39f1d942012-05-08 19:41:24 +00002861static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002862{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002863 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002864 int vf;
2865
Sathya Perla39f1d942012-05-08 19:41:24 +00002866 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2867 GFP_KERNEL);
2868 if (!adapter->vf_cfg)
2869 return -ENOMEM;
2870
Sathya Perla11ac75e2011-12-13 00:58:50 +00002871 for_all_vfs(adapter, vf_cfg, vf) {
2872 vf_cfg->if_handle = -1;
2873 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002874 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002875 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002876}
2877
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002878static int be_vf_setup(struct be_adapter *adapter)
2879{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002880 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002881 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002882 int status, old_vfs, vf;
2883 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302884 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002885
Sathya Perla257a3fe2013-06-14 15:54:51 +05302886 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002887 if (old_vfs) {
2888 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2889 if (old_vfs != num_vfs)
2890 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2891 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002892 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302893 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002894 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302895 be_max_vfs(adapter), num_vfs);
2896 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002897 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002898 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002899 }
2900
2901 status = be_vf_setup_init(adapter);
2902 if (status)
2903 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002904
Sathya Perla4c876612013-02-03 20:30:11 +00002905 if (old_vfs) {
2906 for_all_vfs(adapter, vf_cfg, vf) {
2907 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2908 if (status)
2909 goto err;
2910 }
2911 } else {
2912 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002913 if (status)
2914 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002915 }
2916
Sathya Perla4c876612013-02-03 20:30:11 +00002917 if (old_vfs) {
2918 status = be_vfs_mac_query(adapter);
2919 if (status)
2920 goto err;
2921 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002922 status = be_vf_eth_addr_config(adapter);
2923 if (status)
2924 goto err;
2925 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002926
Sathya Perla11ac75e2011-12-13 00:58:50 +00002927 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302928 /* Allow VFs to programs MAC/VLAN filters */
2929 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2930 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2931 status = be_cmd_set_fn_privileges(adapter,
2932 privileges |
2933 BE_PRIV_FILTMGMT,
2934 vf + 1);
2935 if (!status)
2936 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2937 vf);
2938 }
2939
Sathya Perla4c876612013-02-03 20:30:11 +00002940 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2941 * Allow full available bandwidth
2942 */
2943 if (BE3_chip(adapter) && !old_vfs)
2944 be_cmd_set_qos(adapter, 1000, vf+1);
2945
2946 status = be_cmd_link_status_query(adapter, &lnk_speed,
2947 NULL, vf + 1);
2948 if (!status)
2949 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002950
2951 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05002952 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002953 if (status)
2954 goto err;
2955 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002956
Vasundhara Volam05998632013-10-01 15:59:59 +05302957 if (!old_vfs)
2958 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002959 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002960
2961 if (!old_vfs) {
2962 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2963 if (status) {
2964 dev_err(dev, "SRIOV enable failed\n");
2965 adapter->num_vfs = 0;
2966 goto err;
2967 }
2968 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002969 return 0;
2970err:
Sathya Perla4c876612013-02-03 20:30:11 +00002971 dev_err(dev, "VF setup failed\n");
2972 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002973 return status;
2974}
2975
Sathya Perla92bf14a2013-08-27 16:57:32 +05302976/* On BE2/BE3 FW does not suggest the supported limits */
2977static void BEx_get_resources(struct be_adapter *adapter,
2978 struct be_resources *res)
2979{
2980 struct pci_dev *pdev = adapter->pdev;
2981 bool use_sriov = false;
2982
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05302983 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302984 int max_vfs;
2985
2986 max_vfs = pci_sriov_get_totalvfs(pdev);
2987 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05302988 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302989 }
2990
2991 if (be_physfn(adapter))
2992 res->max_uc_mac = BE_UC_PMAC_COUNT;
2993 else
2994 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2995
2996 if (adapter->function_mode & FLEX10_MODE)
2997 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2998 else
2999 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3000 res->max_mcast_mac = BE_MAX_MC;
3001
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303002 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303003 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303004 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303005 res->max_tx_qs = 1;
3006 else
3007 res->max_tx_qs = BE3_MAX_TX_QS;
3008
3009 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3010 !use_sriov && be_physfn(adapter))
3011 res->max_rss_qs = (adapter->be3_native) ?
3012 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3013 res->max_rx_qs = res->max_rss_qs + 1;
3014
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303015 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303016
3017 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3018 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3019 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3020}
3021
Sathya Perla30128032011-11-10 19:17:57 +00003022static void be_setup_init(struct be_adapter *adapter)
3023{
3024 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003025 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003026 adapter->if_handle = -1;
3027 adapter->be3_native = false;
3028 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003029 if (be_physfn(adapter))
3030 adapter->cmd_privileges = MAX_PRIVILEGES;
3031 else
3032 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003033}
3034
Sathya Perla92bf14a2013-08-27 16:57:32 +05303035static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003036{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303037 struct device *dev = &adapter->pdev->dev;
3038 struct be_resources res = {0};
3039 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003040
Sathya Perla92bf14a2013-08-27 16:57:32 +05303041 if (BEx_chip(adapter)) {
3042 BEx_get_resources(adapter, &res);
3043 adapter->res = res;
3044 }
3045
Sathya Perla92bf14a2013-08-27 16:57:32 +05303046 /* For Lancer, SH etc read per-function resource limits from FW.
3047 * GET_FUNC_CONFIG returns per function guaranteed limits.
3048 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3049 */
Sathya Perla4c876612013-02-03 20:30:11 +00003050 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303051 status = be_cmd_get_func_config(adapter, &res);
3052 if (status)
3053 return status;
3054
3055 /* If RoCE may be enabled stash away half the EQs for RoCE */
3056 if (be_roce_supported(adapter))
3057 res.max_evt_qs /= 2;
3058 adapter->res = res;
3059
3060 if (be_physfn(adapter)) {
3061 status = be_cmd_get_profile_config(adapter, &res, 0);
3062 if (status)
3063 return status;
3064 adapter->res.max_vfs = res.max_vfs;
3065 }
3066
3067 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3068 be_max_txqs(adapter), be_max_rxqs(adapter),
3069 be_max_rss(adapter), be_max_eqs(adapter),
3070 be_max_vfs(adapter));
3071 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3072 be_max_uc(adapter), be_max_mc(adapter),
3073 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003074 }
3075
Sathya Perla92bf14a2013-08-27 16:57:32 +05303076 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003077}
3078
Sathya Perla39f1d942012-05-08 19:41:24 +00003079/* Routine to query per function resource limits */
3080static int be_get_config(struct be_adapter *adapter)
3081{
Sathya Perla4c876612013-02-03 20:30:11 +00003082 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003083
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003084 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3085 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003086 &adapter->function_caps,
3087 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003088 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303089 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003090
Sathya Perla92bf14a2013-08-27 16:57:32 +05303091 status = be_get_resources(adapter);
3092 if (status)
3093 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003094
3095 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303096 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3097 GFP_KERNEL);
3098 if (!adapter->pmac_id)
3099 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003100
Sathya Perla92bf14a2013-08-27 16:57:32 +05303101 /* Sanitize cfg_num_qs based on HW and platform limits */
3102 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3103
3104 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003105}
3106
Sathya Perla95046b92013-07-23 15:25:02 +05303107static int be_mac_setup(struct be_adapter *adapter)
3108{
3109 u8 mac[ETH_ALEN];
3110 int status;
3111
3112 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3113 status = be_cmd_get_perm_mac(adapter, mac);
3114 if (status)
3115 return status;
3116
3117 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3118 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3119 } else {
3120 /* Maybe the HW was reset; dev_addr must be re-programmed */
3121 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3122 }
3123
3124 /* On BE3 VFs this cmd may fail due to lack of privilege.
3125 * Ignore the failure as in this case pmac_id is fetched
3126 * in the IFACE_CREATE cmd.
3127 */
3128 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3129 &adapter->pmac_id[0], 0);
3130 return 0;
3131}
3132
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303133static void be_schedule_worker(struct be_adapter *adapter)
3134{
3135 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3136 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3137}
3138
Sathya Perla77071332013-08-27 16:57:34 +05303139static int be_setup_queues(struct be_adapter *adapter)
3140{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303141 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303142 int status;
3143
3144 status = be_evt_queues_create(adapter);
3145 if (status)
3146 goto err;
3147
3148 status = be_tx_qs_create(adapter);
3149 if (status)
3150 goto err;
3151
3152 status = be_rx_cqs_create(adapter);
3153 if (status)
3154 goto err;
3155
3156 status = be_mcc_queues_create(adapter);
3157 if (status)
3158 goto err;
3159
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303160 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3161 if (status)
3162 goto err;
3163
3164 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3165 if (status)
3166 goto err;
3167
Sathya Perla77071332013-08-27 16:57:34 +05303168 return 0;
3169err:
3170 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3171 return status;
3172}
3173
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303174int be_update_queues(struct be_adapter *adapter)
3175{
3176 struct net_device *netdev = adapter->netdev;
3177 int status;
3178
3179 if (netif_running(netdev))
3180 be_close(netdev);
3181
3182 be_cancel_worker(adapter);
3183
3184 /* If any vectors have been shared with RoCE we cannot re-program
3185 * the MSIx table.
3186 */
3187 if (!adapter->num_msix_roce_vec)
3188 be_msix_disable(adapter);
3189
3190 be_clear_queues(adapter);
3191
3192 if (!msix_enabled(adapter)) {
3193 status = be_msix_enable(adapter);
3194 if (status)
3195 return status;
3196 }
3197
3198 status = be_setup_queues(adapter);
3199 if (status)
3200 return status;
3201
3202 be_schedule_worker(adapter);
3203
3204 if (netif_running(netdev))
3205 status = be_open(netdev);
3206
3207 return status;
3208}
3209
Sathya Perla5fb379e2009-06-18 00:02:59 +00003210static int be_setup(struct be_adapter *adapter)
3211{
Sathya Perla39f1d942012-05-08 19:41:24 +00003212 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303213 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003214 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003215
Sathya Perla30128032011-11-10 19:17:57 +00003216 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003217
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003218 if (!lancer_chip(adapter))
3219 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003220
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003221 status = be_get_config(adapter);
3222 if (status)
3223 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003224
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003225 status = be_msix_enable(adapter);
3226 if (status)
3227 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003228
Sathya Perla77071332013-08-27 16:57:34 +05303229 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3230 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3231 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3232 en_flags |= BE_IF_FLAGS_RSS;
3233 en_flags = en_flags & be_if_cap_flags(adapter);
3234 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3235 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003236 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003237 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303239 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3240 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303241 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303242 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003243 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003244 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003246 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3247 /* In UMC mode FW does not return right privileges.
3248 * Override with correct privilege equivalent to PF.
3249 */
3250 if (be_is_mc(adapter))
3251 adapter->cmd_privileges = MAX_PRIVILEGES;
3252
Sathya Perla95046b92013-07-23 15:25:02 +05303253 status = be_mac_setup(adapter);
3254 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003255 goto err;
3256
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003257 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003258
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003259 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003260 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003261
3262 be_set_rx_mode(adapter->netdev);
3263
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003264 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003265
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003266 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3267 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003268 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003269
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303270 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303271 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003272 be_vf_setup(adapter);
3273 else
3274 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003275 }
3276
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003277 status = be_cmd_get_phy_info(adapter);
3278 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003279 adapter->phy.fc_autoneg = 1;
3280
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303281 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003282 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003283err:
3284 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285 return status;
3286}
3287
Ivan Vecera66268732011-12-08 01:31:21 +00003288#ifdef CONFIG_NET_POLL_CONTROLLER
3289static void be_netpoll(struct net_device *netdev)
3290{
3291 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003292 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003293 int i;
3294
Sathya Perlae49cc342012-11-27 19:50:02 +00003295 for_all_evt_queues(adapter, eqo, i) {
3296 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3297 napi_schedule(&eqo->napi);
3298 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003299
3300 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003301}
3302#endif
3303
Ajit Khaparde84517482009-09-04 03:12:16 +00003304#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003305static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003306
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003307static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003308 const u8 *p, u32 img_start, int image_size,
3309 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003310{
3311 u32 crc_offset;
3312 u8 flashed_crc[4];
3313 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003314
3315 crc_offset = hdr_size + img_start + image_size - 4;
3316
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003317 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003318
3319 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003320 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003321 if (status) {
3322 dev_err(&adapter->pdev->dev,
3323 "could not get crc from flash, not flashing redboot\n");
3324 return false;
3325 }
3326
3327 /*update redboot only if crc does not match*/
3328 if (!memcmp(flashed_crc, p, 4))
3329 return false;
3330 else
3331 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003332}
3333
Sathya Perla306f1342011-08-02 19:57:45 +00003334static bool phy_flashing_required(struct be_adapter *adapter)
3335{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003336 return (adapter->phy.phy_type == TN_8022 &&
3337 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003338}
3339
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003340static bool is_comp_in_ufi(struct be_adapter *adapter,
3341 struct flash_section_info *fsec, int type)
3342{
3343 int i = 0, img_type = 0;
3344 struct flash_section_info_g2 *fsec_g2 = NULL;
3345
Sathya Perlaca34fe32012-11-06 17:48:56 +00003346 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003347 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3348
3349 for (i = 0; i < MAX_FLASH_COMP; i++) {
3350 if (fsec_g2)
3351 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3352 else
3353 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3354
3355 if (img_type == type)
3356 return true;
3357 }
3358 return false;
3359
3360}
3361
Jingoo Han4188e7d2013-08-05 18:02:02 +09003362static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003363 int header_size,
3364 const struct firmware *fw)
3365{
3366 struct flash_section_info *fsec = NULL;
3367 const u8 *p = fw->data;
3368
3369 p += header_size;
3370 while (p < (fw->data + fw->size)) {
3371 fsec = (struct flash_section_info *)p;
3372 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3373 return fsec;
3374 p += 32;
3375 }
3376 return NULL;
3377}
3378
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003379static int be_flash(struct be_adapter *adapter, const u8 *img,
3380 struct be_dma_mem *flash_cmd, int optype, int img_size)
3381{
3382 u32 total_bytes = 0, flash_op, num_bytes = 0;
3383 int status = 0;
3384 struct be_cmd_write_flashrom *req = flash_cmd->va;
3385
3386 total_bytes = img_size;
3387 while (total_bytes) {
3388 num_bytes = min_t(u32, 32*1024, total_bytes);
3389
3390 total_bytes -= num_bytes;
3391
3392 if (!total_bytes) {
3393 if (optype == OPTYPE_PHY_FW)
3394 flash_op = FLASHROM_OPER_PHY_FLASH;
3395 else
3396 flash_op = FLASHROM_OPER_FLASH;
3397 } else {
3398 if (optype == OPTYPE_PHY_FW)
3399 flash_op = FLASHROM_OPER_PHY_SAVE;
3400 else
3401 flash_op = FLASHROM_OPER_SAVE;
3402 }
3403
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003404 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003405 img += num_bytes;
3406 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3407 flash_op, num_bytes);
3408 if (status) {
3409 if (status == ILLEGAL_IOCTL_REQ &&
3410 optype == OPTYPE_PHY_FW)
3411 break;
3412 dev_err(&adapter->pdev->dev,
3413 "cmd to write to flash rom failed.\n");
3414 return status;
3415 }
3416 }
3417 return 0;
3418}
3419
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003420/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003421static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003422 const struct firmware *fw,
3423 struct be_dma_mem *flash_cmd,
3424 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003425
Ajit Khaparde84517482009-09-04 03:12:16 +00003426{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003427 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003428 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003429 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003430 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003431 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003432 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003433
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003434 struct flash_comp gen3_flash_types[] = {
3435 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3436 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3437 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3438 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3439 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3440 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3441 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3442 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3443 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3444 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3445 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3446 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3447 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3448 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3449 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3450 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3451 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3452 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3453 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3454 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003455 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003456
3457 struct flash_comp gen2_flash_types[] = {
3458 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3459 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3460 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3461 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3462 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3463 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3464 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3465 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3466 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3467 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3468 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3469 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3470 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3471 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3472 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3473 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003474 };
3475
Sathya Perlaca34fe32012-11-06 17:48:56 +00003476 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003477 pflashcomp = gen3_flash_types;
3478 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003479 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003480 } else {
3481 pflashcomp = gen2_flash_types;
3482 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003483 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003484 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003485
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003486 /* Get flash section info*/
3487 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3488 if (!fsec) {
3489 dev_err(&adapter->pdev->dev,
3490 "Invalid Cookie. UFI corrupted ?\n");
3491 return -1;
3492 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003493 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003494 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003495 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003496
3497 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3498 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3499 continue;
3500
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003501 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3502 !phy_flashing_required(adapter))
3503 continue;
3504
3505 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3506 redboot = be_flash_redboot(adapter, fw->data,
3507 pflashcomp[i].offset, pflashcomp[i].size,
3508 filehdr_size + img_hdrs_size);
3509 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003510 continue;
3511 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003512
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003513 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003514 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003515 if (p + pflashcomp[i].size > fw->data + fw->size)
3516 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003517
3518 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3519 pflashcomp[i].size);
3520 if (status) {
3521 dev_err(&adapter->pdev->dev,
3522 "Flashing section type %d failed.\n",
3523 pflashcomp[i].img_type);
3524 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003525 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003526 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003527 return 0;
3528}
3529
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003530static int be_flash_skyhawk(struct be_adapter *adapter,
3531 const struct firmware *fw,
3532 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003533{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003534 int status = 0, i, filehdr_size = 0;
3535 int img_offset, img_size, img_optype, redboot;
3536 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3537 const u8 *p = fw->data;
3538 struct flash_section_info *fsec = NULL;
3539
3540 filehdr_size = sizeof(struct flash_file_hdr_g3);
3541 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3542 if (!fsec) {
3543 dev_err(&adapter->pdev->dev,
3544 "Invalid Cookie. UFI corrupted ?\n");
3545 return -1;
3546 }
3547
3548 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3549 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3550 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3551
3552 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3553 case IMAGE_FIRMWARE_iSCSI:
3554 img_optype = OPTYPE_ISCSI_ACTIVE;
3555 break;
3556 case IMAGE_BOOT_CODE:
3557 img_optype = OPTYPE_REDBOOT;
3558 break;
3559 case IMAGE_OPTION_ROM_ISCSI:
3560 img_optype = OPTYPE_BIOS;
3561 break;
3562 case IMAGE_OPTION_ROM_PXE:
3563 img_optype = OPTYPE_PXE_BIOS;
3564 break;
3565 case IMAGE_OPTION_ROM_FCoE:
3566 img_optype = OPTYPE_FCOE_BIOS;
3567 break;
3568 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3569 img_optype = OPTYPE_ISCSI_BACKUP;
3570 break;
3571 case IMAGE_NCSI:
3572 img_optype = OPTYPE_NCSI_FW;
3573 break;
3574 default:
3575 continue;
3576 }
3577
3578 if (img_optype == OPTYPE_REDBOOT) {
3579 redboot = be_flash_redboot(adapter, fw->data,
3580 img_offset, img_size,
3581 filehdr_size + img_hdrs_size);
3582 if (!redboot)
3583 continue;
3584 }
3585
3586 p = fw->data;
3587 p += filehdr_size + img_offset + img_hdrs_size;
3588 if (p + img_size > fw->data + fw->size)
3589 return -1;
3590
3591 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3592 if (status) {
3593 dev_err(&adapter->pdev->dev,
3594 "Flashing section type %d failed.\n",
3595 fsec->fsec_entry[i].type);
3596 return status;
3597 }
3598 }
3599 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003600}
3601
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003602static int lancer_fw_download(struct be_adapter *adapter,
3603 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003604{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003605#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3606#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3607 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003608 const u8 *data_ptr = NULL;
3609 u8 *dest_image_ptr = NULL;
3610 size_t image_size = 0;
3611 u32 chunk_size = 0;
3612 u32 data_written = 0;
3613 u32 offset = 0;
3614 int status = 0;
3615 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003616 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003617
3618 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3619 dev_err(&adapter->pdev->dev,
3620 "FW Image not properly aligned. "
3621 "Length must be 4 byte aligned.\n");
3622 status = -EINVAL;
3623 goto lancer_fw_exit;
3624 }
3625
3626 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3627 + LANCER_FW_DOWNLOAD_CHUNK;
3628 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003629 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003630 if (!flash_cmd.va) {
3631 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003632 goto lancer_fw_exit;
3633 }
3634
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003635 dest_image_ptr = flash_cmd.va +
3636 sizeof(struct lancer_cmd_req_write_object);
3637 image_size = fw->size;
3638 data_ptr = fw->data;
3639
3640 while (image_size) {
3641 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3642
3643 /* Copy the image chunk content. */
3644 memcpy(dest_image_ptr, data_ptr, chunk_size);
3645
3646 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003647 chunk_size, offset,
3648 LANCER_FW_DOWNLOAD_LOCATION,
3649 &data_written, &change_status,
3650 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003651 if (status)
3652 break;
3653
3654 offset += data_written;
3655 data_ptr += data_written;
3656 image_size -= data_written;
3657 }
3658
3659 if (!status) {
3660 /* Commit the FW written */
3661 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003662 0, offset,
3663 LANCER_FW_DOWNLOAD_LOCATION,
3664 &data_written, &change_status,
3665 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003666 }
3667
3668 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3669 flash_cmd.dma);
3670 if (status) {
3671 dev_err(&adapter->pdev->dev,
3672 "Firmware load error. "
3673 "Status code: 0x%x Additional Status: 0x%x\n",
3674 status, add_status);
3675 goto lancer_fw_exit;
3676 }
3677
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003678 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003679 status = lancer_physdev_ctrl(adapter,
3680 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003681 if (status) {
3682 dev_err(&adapter->pdev->dev,
3683 "Adapter busy for FW reset.\n"
3684 "New FW will not be active.\n");
3685 goto lancer_fw_exit;
3686 }
3687 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3688 dev_err(&adapter->pdev->dev,
3689 "System reboot required for new FW"
3690 " to be active\n");
3691 }
3692
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003693 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3694lancer_fw_exit:
3695 return status;
3696}
3697
Sathya Perlaca34fe32012-11-06 17:48:56 +00003698#define UFI_TYPE2 2
3699#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003700#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003701#define UFI_TYPE4 4
3702static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003703 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003704{
3705 if (fhdr == NULL)
3706 goto be_get_ufi_exit;
3707
Sathya Perlaca34fe32012-11-06 17:48:56 +00003708 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3709 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003710 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3711 if (fhdr->asic_type_rev == 0x10)
3712 return UFI_TYPE3R;
3713 else
3714 return UFI_TYPE3;
3715 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003716 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003717
3718be_get_ufi_exit:
3719 dev_err(&adapter->pdev->dev,
3720 "UFI and Interface are not compatible for flashing\n");
3721 return -1;
3722}
3723
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003724static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3725{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003726 struct flash_file_hdr_g3 *fhdr3;
3727 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003728 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003729 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003730 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003731
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003732 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003733 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3734 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003735 if (!flash_cmd.va) {
3736 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003737 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003738 }
3739
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003740 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003741 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003742
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003743 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003744
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003745 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3746 for (i = 0; i < num_imgs; i++) {
3747 img_hdr_ptr = (struct image_hdr *)(fw->data +
3748 (sizeof(struct flash_file_hdr_g3) +
3749 i * sizeof(struct image_hdr)));
3750 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003751 switch (ufi_type) {
3752 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003753 status = be_flash_skyhawk(adapter, fw,
3754 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003755 break;
3756 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003757 status = be_flash_BEx(adapter, fw, &flash_cmd,
3758 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003759 break;
3760 case UFI_TYPE3:
3761 /* Do not flash this ufi on BE3-R cards */
3762 if (adapter->asic_rev < 0x10)
3763 status = be_flash_BEx(adapter, fw,
3764 &flash_cmd,
3765 num_imgs);
3766 else {
3767 status = -1;
3768 dev_err(&adapter->pdev->dev,
3769 "Can't load BE3 UFI on BE3R\n");
3770 }
3771 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003772 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003773 }
3774
Sathya Perlaca34fe32012-11-06 17:48:56 +00003775 if (ufi_type == UFI_TYPE2)
3776 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003777 else if (ufi_type == -1)
3778 status = -1;
3779
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003780 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3781 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003782 if (status) {
3783 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003784 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003785 }
3786
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003787 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003788
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003789be_fw_exit:
3790 return status;
3791}
3792
3793int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3794{
3795 const struct firmware *fw;
3796 int status;
3797
3798 if (!netif_running(adapter->netdev)) {
3799 dev_err(&adapter->pdev->dev,
3800 "Firmware load not allowed (interface is down)\n");
3801 return -1;
3802 }
3803
3804 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3805 if (status)
3806 goto fw_exit;
3807
3808 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3809
3810 if (lancer_chip(adapter))
3811 status = lancer_fw_download(adapter, fw);
3812 else
3813 status = be_fw_download(adapter, fw);
3814
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003815 if (!status)
3816 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3817 adapter->fw_on_flash);
3818
Ajit Khaparde84517482009-09-04 03:12:16 +00003819fw_exit:
3820 release_firmware(fw);
3821 return status;
3822}
3823
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003824static int be_ndo_bridge_setlink(struct net_device *dev,
3825 struct nlmsghdr *nlh)
3826{
3827 struct be_adapter *adapter = netdev_priv(dev);
3828 struct nlattr *attr, *br_spec;
3829 int rem;
3830 int status = 0;
3831 u16 mode = 0;
3832
3833 if (!sriov_enabled(adapter))
3834 return -EOPNOTSUPP;
3835
3836 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3837
3838 nla_for_each_nested(attr, br_spec, rem) {
3839 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3840 continue;
3841
3842 mode = nla_get_u16(attr);
3843 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3844 return -EINVAL;
3845
3846 status = be_cmd_set_hsw_config(adapter, 0, 0,
3847 adapter->if_handle,
3848 mode == BRIDGE_MODE_VEPA ?
3849 PORT_FWD_TYPE_VEPA :
3850 PORT_FWD_TYPE_VEB);
3851 if (status)
3852 goto err;
3853
3854 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3855 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3856
3857 return status;
3858 }
3859err:
3860 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3861 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3862
3863 return status;
3864}
3865
3866static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3867 struct net_device *dev,
3868 u32 filter_mask)
3869{
3870 struct be_adapter *adapter = netdev_priv(dev);
3871 int status = 0;
3872 u8 hsw_mode;
3873
3874 if (!sriov_enabled(adapter))
3875 return 0;
3876
3877 /* BE and Lancer chips support VEB mode only */
3878 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3879 hsw_mode = PORT_FWD_TYPE_VEB;
3880 } else {
3881 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3882 adapter->if_handle, &hsw_mode);
3883 if (status)
3884 return 0;
3885 }
3886
3887 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3888 hsw_mode == PORT_FWD_TYPE_VEPA ?
3889 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3890}
3891
stephen hemmingere5686ad2012-01-05 19:10:25 +00003892static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003893 .ndo_open = be_open,
3894 .ndo_stop = be_close,
3895 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003896 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003897 .ndo_set_mac_address = be_mac_addr_set,
3898 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003899 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003900 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3902 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003903 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003904 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003905 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003906 .ndo_get_vf_config = be_get_vf_config,
3907#ifdef CONFIG_NET_POLL_CONTROLLER
3908 .ndo_poll_controller = be_netpoll,
3909#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003910 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3911 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003912};
3913
3914static void be_netdev_init(struct net_device *netdev)
3915{
3916 struct be_adapter *adapter = netdev_priv(netdev);
3917
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003918 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003919 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003920 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003921 if (be_multi_rxq(adapter))
3922 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003923
3924 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003925 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003926
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003927 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003928 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003929
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003930 netdev->priv_flags |= IFF_UNICAST_FLT;
3931
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003932 netdev->flags |= IFF_MULTICAST;
3933
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003934 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003935
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003936 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003937
3938 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003939}
3940
3941static void be_unmap_pci_bars(struct be_adapter *adapter)
3942{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003943 if (adapter->csr)
3944 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003945 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003946 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003947}
3948
Sathya Perlace66f782012-11-06 17:48:58 +00003949static int db_bar(struct be_adapter *adapter)
3950{
3951 if (lancer_chip(adapter) || !be_physfn(adapter))
3952 return 0;
3953 else
3954 return 4;
3955}
3956
3957static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003958{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003959 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003960 adapter->roce_db.size = 4096;
3961 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3962 db_bar(adapter));
3963 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3964 db_bar(adapter));
3965 }
Parav Pandit045508a2012-03-26 14:27:13 +00003966 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003967}
3968
3969static int be_map_pci_bars(struct be_adapter *adapter)
3970{
3971 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003972 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003973
Sathya Perlace66f782012-11-06 17:48:58 +00003974 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3975 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3976 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003977
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003978 if (BEx_chip(adapter) && be_physfn(adapter)) {
3979 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3980 if (adapter->csr == NULL)
3981 return -ENOMEM;
3982 }
3983
Sathya Perlace66f782012-11-06 17:48:58 +00003984 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003985 if (addr == NULL)
3986 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003987 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003988
3989 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003990 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003991
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003992pci_map_err:
3993 be_unmap_pci_bars(adapter);
3994 return -ENOMEM;
3995}
3996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003997static void be_ctrl_cleanup(struct be_adapter *adapter)
3998{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003999 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004000
4001 be_unmap_pci_bars(adapter);
4002
4003 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004004 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4005 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004006
Sathya Perla5b8821b2011-08-02 19:57:44 +00004007 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004008 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004009 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4010 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004011}
4012
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004013static int be_ctrl_init(struct be_adapter *adapter)
4014{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004015 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4016 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004017 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004018 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004019 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004020
Sathya Perlace66f782012-11-06 17:48:58 +00004021 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4022 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4023 SLI_INTF_FAMILY_SHIFT;
4024 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4025
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004026 status = be_map_pci_bars(adapter);
4027 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004028 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004029
4030 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004031 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4032 mbox_mem_alloc->size,
4033 &mbox_mem_alloc->dma,
4034 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004035 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004036 status = -ENOMEM;
4037 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004038 }
4039 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4040 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4041 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4042 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004043
Sathya Perla5b8821b2011-08-02 19:57:44 +00004044 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004045 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4046 rx_filter->size, &rx_filter->dma,
4047 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004048 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004049 status = -ENOMEM;
4050 goto free_mbox;
4051 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004052
Ivan Vecera29849612010-12-14 05:43:19 +00004053 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004054 spin_lock_init(&adapter->mcc_lock);
4055 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004056
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07004057 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004058 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004059 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004060
4061free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004062 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4063 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004064
4065unmap_pci_bars:
4066 be_unmap_pci_bars(adapter);
4067
4068done:
4069 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070}
4071
4072static void be_stats_cleanup(struct be_adapter *adapter)
4073{
Sathya Perla3abcded2010-10-03 22:12:27 -07004074 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004075
4076 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004077 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4078 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004079}
4080
4081static int be_stats_init(struct be_adapter *adapter)
4082{
Sathya Perla3abcded2010-10-03 22:12:27 -07004083 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004084
Sathya Perlaca34fe32012-11-06 17:48:56 +00004085 if (lancer_chip(adapter))
4086 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4087 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004088 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004089 else
4090 /* BE3 and Skyhawk */
4091 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4092
Joe Perchesede23fa82013-08-26 22:45:23 -07004093 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4094 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095 if (cmd->va == NULL)
4096 return -1;
4097 return 0;
4098}
4099
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004100static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004101{
4102 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004103
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004104 if (!adapter)
4105 return;
4106
Parav Pandit045508a2012-03-26 14:27:13 +00004107 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004108 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004109
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004110 cancel_delayed_work_sync(&adapter->func_recovery_work);
4111
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004112 unregister_netdev(adapter->netdev);
4113
Sathya Perla5fb379e2009-06-18 00:02:59 +00004114 be_clear(adapter);
4115
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004116 /* tell fw we're done with firing cmds */
4117 be_cmd_fw_clean(adapter);
4118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004119 be_stats_cleanup(adapter);
4120
4121 be_ctrl_cleanup(adapter);
4122
Sathya Perlad6b6d982012-09-05 01:56:48 +00004123 pci_disable_pcie_error_reporting(pdev);
4124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125 pci_set_drvdata(pdev, NULL);
4126 pci_release_regions(pdev);
4127 pci_disable_device(pdev);
4128
4129 free_netdev(adapter->netdev);
4130}
4131
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004132bool be_is_wol_supported(struct be_adapter *adapter)
4133{
4134 return ((adapter->wol_cap & BE_WOL_CAP) &&
4135 !be_is_wol_excluded(adapter)) ? true : false;
4136}
4137
Somnath Kotur941a77d2012-05-17 22:59:03 +00004138u32 be_get_fw_log_level(struct be_adapter *adapter)
4139{
4140 struct be_dma_mem extfat_cmd;
4141 struct be_fat_conf_params *cfgs;
4142 int status;
4143 u32 level = 0;
4144 int j;
4145
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004146 if (lancer_chip(adapter))
4147 return 0;
4148
Somnath Kotur941a77d2012-05-17 22:59:03 +00004149 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4150 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4151 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4152 &extfat_cmd.dma);
4153
4154 if (!extfat_cmd.va) {
4155 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4156 __func__);
4157 goto err;
4158 }
4159
4160 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4161 if (!status) {
4162 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4163 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004164 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004165 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4166 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4167 }
4168 }
4169 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4170 extfat_cmd.dma);
4171err:
4172 return level;
4173}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004174
Sathya Perla39f1d942012-05-08 19:41:24 +00004175static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004176{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004178 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004179
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004180 status = be_cmd_get_cntl_attributes(adapter);
4181 if (status)
4182 return status;
4183
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004184 status = be_cmd_get_acpi_wol_cap(adapter);
4185 if (status) {
4186 /* in case of a failure to get wol capabillities
4187 * check the exclusion list to determine WOL capability */
4188 if (!be_is_wol_excluded(adapter))
4189 adapter->wol_cap |= BE_WOL_CAP;
4190 }
4191
4192 if (be_is_wol_supported(adapter))
4193 adapter->wol = true;
4194
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004195 /* Must be a power of 2 or else MODULO will BUG_ON */
4196 adapter->be_get_temp_freq = 64;
4197
Somnath Kotur941a77d2012-05-17 22:59:03 +00004198 level = be_get_fw_log_level(adapter);
4199 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4200
Sathya Perla92bf14a2013-08-27 16:57:32 +05304201 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004202 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004203}
4204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004205static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004206{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004207 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004208 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004209
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004210 status = lancer_test_and_set_rdy_state(adapter);
4211 if (status)
4212 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004213
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004214 if (netif_running(adapter->netdev))
4215 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004216
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004217 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004218
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004219 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004220
4221 status = be_setup(adapter);
4222 if (status)
4223 goto err;
4224
4225 if (netif_running(adapter->netdev)) {
4226 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004227 if (status)
4228 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004229 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004230
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004231 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004232 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004233err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004234 if (status == -EAGAIN)
4235 dev_err(dev, "Waiting for resource provisioning\n");
4236 else
4237 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004238
4239 return status;
4240}
4241
4242static void be_func_recovery_task(struct work_struct *work)
4243{
4244 struct be_adapter *adapter =
4245 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004246 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004247
4248 be_detect_error(adapter);
4249
4250 if (adapter->hw_error && lancer_chip(adapter)) {
4251
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004252 rtnl_lock();
4253 netif_device_detach(adapter->netdev);
4254 rtnl_unlock();
4255
4256 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004257 if (!status)
4258 netif_device_attach(adapter->netdev);
4259 }
4260
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004261 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4262 * no need to attempt further recovery.
4263 */
4264 if (!status || status == -EAGAIN)
4265 schedule_delayed_work(&adapter->func_recovery_work,
4266 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004267}
4268
4269static void be_worker(struct work_struct *work)
4270{
4271 struct be_adapter *adapter =
4272 container_of(work, struct be_adapter, work.work);
4273 struct be_rx_obj *rxo;
4274 int i;
4275
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004276 /* when interrupts are not yet enabled, just reap any pending
4277 * mcc completions */
4278 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004279 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004280 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004281 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004282 goto reschedule;
4283 }
4284
4285 if (!adapter->stats_cmd_sent) {
4286 if (lancer_chip(adapter))
4287 lancer_cmd_get_pport_stats(adapter,
4288 &adapter->stats_cmd);
4289 else
4290 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4291 }
4292
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304293 if (be_physfn(adapter) &&
4294 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004295 be_cmd_get_die_temperature(adapter);
4296
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004297 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004298 if (rxo->rx_post_starved) {
4299 rxo->rx_post_starved = false;
4300 be_post_rx_frags(rxo, GFP_KERNEL);
4301 }
4302 }
4303
Sathya Perla2632baf2013-10-01 16:00:00 +05304304 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004305
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004306reschedule:
4307 adapter->work_counter++;
4308 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4309}
4310
Sathya Perla257a3fe2013-06-14 15:54:51 +05304311/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004312static bool be_reset_required(struct be_adapter *adapter)
4313{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304314 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004315}
4316
Sathya Perlad3791422012-09-28 04:39:44 +00004317static char *mc_name(struct be_adapter *adapter)
4318{
4319 if (adapter->function_mode & FLEX10_MODE)
4320 return "FLEX10";
4321 else if (adapter->function_mode & VNIC_MODE)
4322 return "vNIC";
4323 else if (adapter->function_mode & UMC_ENABLED)
4324 return "UMC";
4325 else
4326 return "";
4327}
4328
4329static inline char *func_name(struct be_adapter *adapter)
4330{
4331 return be_physfn(adapter) ? "PF" : "VF";
4332}
4333
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004334static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004335{
4336 int status = 0;
4337 struct be_adapter *adapter;
4338 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004339 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004340
4341 status = pci_enable_device(pdev);
4342 if (status)
4343 goto do_none;
4344
4345 status = pci_request_regions(pdev, DRV_NAME);
4346 if (status)
4347 goto disable_dev;
4348 pci_set_master(pdev);
4349
Sathya Perla7f640062012-06-05 19:37:20 +00004350 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004351 if (netdev == NULL) {
4352 status = -ENOMEM;
4353 goto rel_reg;
4354 }
4355 adapter = netdev_priv(netdev);
4356 adapter->pdev = pdev;
4357 pci_set_drvdata(pdev, adapter);
4358 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004359 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004360
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004361 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004362 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004363 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4364 if (status < 0) {
4365 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4366 goto free_netdev;
4367 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004368 netdev->features |= NETIF_F_HIGHDMA;
4369 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004370 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304371 if (!status)
4372 status = dma_set_coherent_mask(&pdev->dev,
4373 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004374 if (status) {
4375 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4376 goto free_netdev;
4377 }
4378 }
4379
Sathya Perlad6b6d982012-09-05 01:56:48 +00004380 status = pci_enable_pcie_error_reporting(pdev);
4381 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004382 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004384 status = be_ctrl_init(adapter);
4385 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004386 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004387
Sathya Perla2243e2e2009-11-22 22:02:03 +00004388 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004389 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004390 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004391 if (status)
4392 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004393 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004394
Sathya Perla39f1d942012-05-08 19:41:24 +00004395 if (be_reset_required(adapter)) {
4396 status = be_cmd_reset_function(adapter);
4397 if (status)
4398 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004399
Kalesh AP2d177be2013-04-28 22:22:29 +00004400 /* Wait for interrupts to quiesce after an FLR */
4401 msleep(100);
4402 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004403
4404 /* Allow interrupts for other ULPs running on NIC function */
4405 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004406
Kalesh AP2d177be2013-04-28 22:22:29 +00004407 /* tell fw we're ready to fire cmds */
4408 status = be_cmd_fw_init(adapter);
4409 if (status)
4410 goto ctrl_clean;
4411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004412 status = be_stats_init(adapter);
4413 if (status)
4414 goto ctrl_clean;
4415
Sathya Perla39f1d942012-05-08 19:41:24 +00004416 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004417 if (status)
4418 goto stats_clean;
4419
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004420 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004421 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004422 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004423
Sathya Perla5fb379e2009-06-18 00:02:59 +00004424 status = be_setup(adapter);
4425 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004426 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004427
Sathya Perla3abcded2010-10-03 22:12:27 -07004428 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004429 status = register_netdev(netdev);
4430 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004431 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004432
Parav Pandit045508a2012-03-26 14:27:13 +00004433 be_roce_dev_add(adapter);
4434
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004435 schedule_delayed_work(&adapter->func_recovery_work,
4436 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004437
4438 be_cmd_query_port_name(adapter, &port_name);
4439
Sathya Perlad3791422012-09-28 04:39:44 +00004440 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4441 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004443 return 0;
4444
Sathya Perla5fb379e2009-06-18 00:02:59 +00004445unsetup:
4446 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004447stats_clean:
4448 be_stats_cleanup(adapter);
4449ctrl_clean:
4450 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004451free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004452 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004453 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004454rel_reg:
4455 pci_release_regions(pdev);
4456disable_dev:
4457 pci_disable_device(pdev);
4458do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004459 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004460 return status;
4461}
4462
4463static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4464{
4465 struct be_adapter *adapter = pci_get_drvdata(pdev);
4466 struct net_device *netdev = adapter->netdev;
4467
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004468 if (adapter->wol)
4469 be_setup_wol(adapter, true);
4470
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004471 cancel_delayed_work_sync(&adapter->func_recovery_work);
4472
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004473 netif_device_detach(netdev);
4474 if (netif_running(netdev)) {
4475 rtnl_lock();
4476 be_close(netdev);
4477 rtnl_unlock();
4478 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004479 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004480
4481 pci_save_state(pdev);
4482 pci_disable_device(pdev);
4483 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4484 return 0;
4485}
4486
4487static int be_resume(struct pci_dev *pdev)
4488{
4489 int status = 0;
4490 struct be_adapter *adapter = pci_get_drvdata(pdev);
4491 struct net_device *netdev = adapter->netdev;
4492
4493 netif_device_detach(netdev);
4494
4495 status = pci_enable_device(pdev);
4496 if (status)
4497 return status;
4498
Yijing Wang1ca01512013-06-27 20:53:42 +08004499 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004500 pci_restore_state(pdev);
4501
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304502 status = be_fw_wait_ready(adapter);
4503 if (status)
4504 return status;
4505
Sathya Perla2243e2e2009-11-22 22:02:03 +00004506 /* tell fw we're ready to fire cmds */
4507 status = be_cmd_fw_init(adapter);
4508 if (status)
4509 return status;
4510
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004511 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004512 if (netif_running(netdev)) {
4513 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004514 be_open(netdev);
4515 rtnl_unlock();
4516 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004517
4518 schedule_delayed_work(&adapter->func_recovery_work,
4519 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004520 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004521
4522 if (adapter->wol)
4523 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004524
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004525 return 0;
4526}
4527
Sathya Perla82456b02010-02-17 01:35:37 +00004528/*
4529 * An FLR will stop BE from DMAing any data.
4530 */
4531static void be_shutdown(struct pci_dev *pdev)
4532{
4533 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004534
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004535 if (!adapter)
4536 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004537
Sathya Perla0f4a6822011-03-21 20:49:28 +00004538 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004539 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004540
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004541 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004542
Ajit Khaparde57841862011-04-06 18:08:43 +00004543 be_cmd_reset_function(adapter);
4544
Sathya Perla82456b02010-02-17 01:35:37 +00004545 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004546}
4547
Sathya Perlacf588472010-02-14 21:22:01 +00004548static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4549 pci_channel_state_t state)
4550{
4551 struct be_adapter *adapter = pci_get_drvdata(pdev);
4552 struct net_device *netdev = adapter->netdev;
4553
4554 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4555
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004556 if (!adapter->eeh_error) {
4557 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004558
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004559 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004560
Sathya Perlacf588472010-02-14 21:22:01 +00004561 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004562 netif_device_detach(netdev);
4563 if (netif_running(netdev))
4564 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004565 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004566
4567 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004568 }
Sathya Perlacf588472010-02-14 21:22:01 +00004569
4570 if (state == pci_channel_io_perm_failure)
4571 return PCI_ERS_RESULT_DISCONNECT;
4572
4573 pci_disable_device(pdev);
4574
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004575 /* The error could cause the FW to trigger a flash debug dump.
4576 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004577 * can cause it not to recover; wait for it to finish.
4578 * Wait only for first function as it is needed only once per
4579 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004580 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004581 if (pdev->devfn == 0)
4582 ssleep(30);
4583
Sathya Perlacf588472010-02-14 21:22:01 +00004584 return PCI_ERS_RESULT_NEED_RESET;
4585}
4586
4587static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4588{
4589 struct be_adapter *adapter = pci_get_drvdata(pdev);
4590 int status;
4591
4592 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004593
4594 status = pci_enable_device(pdev);
4595 if (status)
4596 return PCI_ERS_RESULT_DISCONNECT;
4597
4598 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004599 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004600 pci_restore_state(pdev);
4601
4602 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004603 dev_info(&adapter->pdev->dev,
4604 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004605 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004606 if (status)
4607 return PCI_ERS_RESULT_DISCONNECT;
4608
Sathya Perlad6b6d982012-09-05 01:56:48 +00004609 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004610 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004611 return PCI_ERS_RESULT_RECOVERED;
4612}
4613
4614static void be_eeh_resume(struct pci_dev *pdev)
4615{
4616 int status = 0;
4617 struct be_adapter *adapter = pci_get_drvdata(pdev);
4618 struct net_device *netdev = adapter->netdev;
4619
4620 dev_info(&adapter->pdev->dev, "EEH resume\n");
4621
4622 pci_save_state(pdev);
4623
Kalesh AP2d177be2013-04-28 22:22:29 +00004624 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004625 if (status)
4626 goto err;
4627
Kalesh AP2d177be2013-04-28 22:22:29 +00004628 /* tell fw we're ready to fire cmds */
4629 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004630 if (status)
4631 goto err;
4632
Sathya Perlacf588472010-02-14 21:22:01 +00004633 status = be_setup(adapter);
4634 if (status)
4635 goto err;
4636
4637 if (netif_running(netdev)) {
4638 status = be_open(netdev);
4639 if (status)
4640 goto err;
4641 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004642
4643 schedule_delayed_work(&adapter->func_recovery_work,
4644 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004645 netif_device_attach(netdev);
4646 return;
4647err:
4648 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004649}
4650
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004651static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004652 .error_detected = be_eeh_err_detected,
4653 .slot_reset = be_eeh_reset,
4654 .resume = be_eeh_resume,
4655};
4656
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004657static struct pci_driver be_driver = {
4658 .name = DRV_NAME,
4659 .id_table = be_dev_ids,
4660 .probe = be_probe,
4661 .remove = be_remove,
4662 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004663 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004664 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004665 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004666};
4667
4668static int __init be_init_module(void)
4669{
Joe Perches8e95a202009-12-03 07:58:21 +00004670 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4671 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004672 printk(KERN_WARNING DRV_NAME
4673 " : Module param rx_frag_size must be 2048/4096/8192."
4674 " Using 2048\n");
4675 rx_frag_size = 2048;
4676 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004678 return pci_register_driver(&be_driver);
4679}
4680module_init(be_init_module);
4681
4682static void __exit be_exit_module(void)
4683{
4684 pci_unregister_driver(&be_driver);
4685}
4686module_exit(be_exit_module);