blob: 2962d2ff9f1c57afd5a231f6a621d36a8e4b9159 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070025
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000029MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030MODULE_LICENSE("GPL");
31
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla11ac75e2011-12-13 00:58:50 +000036static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070049 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000052/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070053static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000054 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070088static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000089 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700112 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122
Sathya Perla752961a2011-10-24 02:45:03 +0000123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000136 mem->va = NULL;
137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000152 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153 return 0;
154}
155
Somnath Kotur68c45a22013-03-14 02:42:07 +0000156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Somnath Kotur68c45a22013-03-14 02:42:07 +0000175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000207
208 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000220 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Sathya Perla5a712c12013-07-23 15:24:59 +0530260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000265 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 }
278
Sathya Perla5a712c12013-07-23 15:24:59 +0530279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000283 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000284 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 return 0;
297err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 return status;
300}
301
Sathya Perlaca34fe32012-11-06 17:48:56 +0000302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314}
315
316/* BE2 supports only v0 cmd */
317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318{
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328}
329
330static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000331{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000335 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000338
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377}
378
Sathya Perlaca34fe32012-11-06 17:48:56 +0000379static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423}
424
Selvin Xavier005d5692011-05-16 07:36:35 +0000425static void populate_lancer_stats(struct be_adapter *adapter)
426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427
Selvin Xavier005d5692011-05-16 07:36:35 +0000428 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000457 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000461 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000462}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463
Sathya Perla09c1c682011-08-22 19:41:53 +0000464static void accumulate_16bit_val(u32 *acc, u16 val)
465{
466#define lo(x) (x & 0xFFFF)
467#define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474}
475
Jingoo Han4188e7d2013-08-05 18:02:02 +0900476static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479{
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488}
489
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490void be_parse_stats(struct be_adapter *adapter)
491{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000495 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000496
Sathya Perlaca34fe32012-11-06 17:48:56 +0000497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505
Sathya Perlaca34fe32012-11-06 17:48:56 +0000506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000510 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000511 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512}
513
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000517 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000518 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700519 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000520 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000521 u64 pkts, bytes;
522 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700523 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524
Sathya Perla3abcded2010-10-03 22:12:27 -0700525 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700537 }
538
Sathya Perla3c8def92011-06-12 20:01:58 +0000539 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549
550 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000560 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000563 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000566
Sathya Perlaab1594e2011-07-25 19:10:15 +0000567 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568
569 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000571
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000577 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578}
579
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 struct net_device *netdev = adapter->netdev;
583
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000585 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593}
594
Sathya Perla3c8def92011-06-12 20:01:58 +0000595static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597{
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_stats *stats = tx_stats(txo);
599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000606 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608}
609
610/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 /* to account for hdr wrb */
619 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629}
630
631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632{
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000636 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637}
638
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641{
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653}
654
Somnath Koturcc4ce022010-10-21 07:11:14 -0700655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000658 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000664 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700677 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688}
689
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000691 bool unmap_single)
692{
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000698 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000699 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000702 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000704 }
705}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Sathya Perla3c8def92011-06-12 20:01:58 +0000707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710{
Sathya Perla7101e112010-03-22 20:41:12 +0000711 dma_addr_t busaddr;
712 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000713 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000717 bool map_single = false;
718 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000722 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723
David S. Millerebc8d2a2009-06-09 01:01:31 -0700724 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700725 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000728 goto dma_err;
729 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
David S. Millerebc8d2a2009-06-09 01:01:31 -0700737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000738 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700739 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000740 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000741 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000742 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000743 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700744 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000748 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000762dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Somnath Kotur93040ae2012-06-26 22:32:10 +0000774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000777{
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000784 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000796
797 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000799 if (unlikely(!skb))
800 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
Somnath Kotur93040ae2012-06-26 22:32:10 +0000814 return skb;
815}
816
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818{
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837}
838
839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840{
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842}
843
Sathya Perlaee9c7992013-05-22 23:04:55 +0000844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000846{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000848}
849
Sathya Perlaee9c7992013-05-22 23:04:55 +0000850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000859 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500860 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000861 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000870 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000871 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000876 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
880
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000886 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000887
Somnath Kotur93040ae2012-06-26 22:32:10 +0000888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000917 if (unlikely(!skb))
918 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000919 }
920
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 return skb;
922tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925}
926
927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928{
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb)
939 return NETDEV_TX_OK;
940
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000943 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000945 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000946 int gso_segs = skb_shinfo(skb)->gso_segs;
947
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000948 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000949 BUG_ON(txo->sent_skb_list[start]);
950 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000952 /* Ensure txq has space for the next skb; Else stop the queue
953 * *BEFORE* ringing the tx doorbell, so that we serialze the
954 * tx compls of the current transmit which'll wake up the queue
955 */
Sathya Perla7101e112010-03-22 20:41:12 +0000956 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000957 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000959 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000960 stopped = true;
961 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000963 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000964
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000966 } else {
967 txq->head = start;
968 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970 return NETDEV_TX_OK;
971}
972
973static int be_change_mtu(struct net_device *netdev, int new_mtu)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000977 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700979 dev_info(&adapter->pdev->dev,
980 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000981 BE_MIN_MTU,
982 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983 return -EINVAL;
984 }
985 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986 netdev->mtu, new_mtu);
987 netdev->mtu = new_mtu;
988 return 0;
989}
990
991/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700994 */
Sathya Perla10329df2012-06-05 19:37:18 +0000995static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996{
Sathya Perla10329df2012-06-05 19:37:18 +0000997 u16 vids[BE_NUM_VLANS_SUPPORTED];
998 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000999 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001000
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001001 /* No need to further configure vids if in promiscuous mode */
1002 if (adapter->promiscuous)
1003 return 0;
1004
Sathya Perla92bf14a2013-08-27 16:57:32 +05301005 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001006 goto set_vlan_promisc;
1007
1008 /* Construct VLAN Table to give to HW */
1009 for (i = 0; i < VLAN_N_VID; i++)
1010 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001011 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001012
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001014 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001015
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001016 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001036
Sathya Perlab31c50a2009-09-17 10:30:13 -07001037 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001038
1039set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001050 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051}
1052
Patrick McHardy80d5c362013-04-19 02:04:28 +00001053static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054{
1055 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001056 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001058 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001059 status = -EINVAL;
1060 goto ret;
1061 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001062
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001063 /* Packets with VID 0 are always received by Lancer by default */
1064 if (lancer_chip(adapter) && vid == 0)
1065 goto ret;
1066
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301068 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001069 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001070
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001071 if (!status)
1072 adapter->vlans_added++;
1073 else
1074 adapter->vlan_tag[vid] = 0;
1075ret:
1076 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077}
1078
Patrick McHardy80d5c362013-04-19 02:04:28 +00001079static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080{
1081 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001082 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001084 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001085 status = -EINVAL;
1086 goto ret;
1087 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001088
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001089 /* Packets with VID 0 are always received by Lancer by default */
1090 if (lancer_chip(adapter) && vid == 0)
1091 goto ret;
1092
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301094 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001095 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001096
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001097 if (!status)
1098 adapter->vlans_added--;
1099 else
1100 adapter->vlan_tag[vid] = 1;
1101ret:
1102 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103}
1104
Sathya Perlaa54769f2011-10-24 02:45:00 +00001105static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106{
1107 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001108 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109
1110 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001111 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001112 adapter->promiscuous = true;
1113 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001115
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001116 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001117 if (adapter->promiscuous) {
1118 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001119 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001120
1121 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001122 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001123 }
1124
Sathya Perlae7b909a2009-11-22 22:01:10 +00001125 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001126 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301127 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001128 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001129 goto done;
1130 }
1131
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001132 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1133 struct netdev_hw_addr *ha;
1134 int i = 1; /* First slot is claimed by the Primary MAC */
1135
1136 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1137 be_cmd_pmac_del(adapter, adapter->if_handle,
1138 adapter->pmac_id[i], 0);
1139 }
1140
Sathya Perla92bf14a2013-08-27 16:57:32 +05301141 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001142 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1143 adapter->promiscuous = true;
1144 goto done;
1145 }
1146
1147 netdev_for_each_uc_addr(ha, adapter->netdev) {
1148 adapter->uc_macs++; /* First slot is for Primary MAC */
1149 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1150 adapter->if_handle,
1151 &adapter->pmac_id[adapter->uc_macs], 0);
1152 }
1153 }
1154
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001155 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1156
1157 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1158 if (status) {
1159 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1160 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1161 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1162 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001163done:
1164 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165}
1166
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001167static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1168{
1169 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001170 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001171 int status;
1172
Sathya Perla11ac75e2011-12-13 00:58:50 +00001173 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001174 return -EPERM;
1175
Sathya Perla11ac75e2011-12-13 00:58:50 +00001176 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001177 return -EINVAL;
1178
Sathya Perla3175d8c2013-07-23 15:25:03 +05301179 if (BEx_chip(adapter)) {
1180 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1181 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001182
Sathya Perla11ac75e2011-12-13 00:58:50 +00001183 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1184 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301185 } else {
1186 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1187 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001188 }
1189
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001190 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001191 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1192 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001193 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001194 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001195
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001196 return status;
1197}
1198
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001199static int be_get_vf_config(struct net_device *netdev, int vf,
1200 struct ifla_vf_info *vi)
1201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001203 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001204
Sathya Perla11ac75e2011-12-13 00:58:50 +00001205 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001206 return -EPERM;
1207
Sathya Perla11ac75e2011-12-13 00:58:50 +00001208 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001209 return -EINVAL;
1210
1211 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001212 vi->tx_rate = vf_cfg->tx_rate;
1213 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001214 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001215 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001216
1217 return 0;
1218}
1219
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001220static int be_set_vf_vlan(struct net_device *netdev,
1221 int vf, u16 vlan, u8 qos)
1222{
1223 struct be_adapter *adapter = netdev_priv(netdev);
1224 int status = 0;
1225
Sathya Perla11ac75e2011-12-13 00:58:50 +00001226 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001227 return -EPERM;
1228
Sathya Perla11ac75e2011-12-13 00:58:50 +00001229 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001230 return -EINVAL;
1231
1232 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001233 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1234 /* If this is new value, program it. Else skip. */
1235 adapter->vf_cfg[vf].vlan_tag = vlan;
1236
1237 status = be_cmd_set_hsw_config(adapter, vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05001238 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001239 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001240 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001241 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001242 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001243 vlan = adapter->vf_cfg[vf].def_vid;
1244 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05001245 adapter->vf_cfg[vf].if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001246 }
1247
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001248
1249 if (status)
1250 dev_info(&adapter->pdev->dev,
1251 "VLAN %d config on VF %d failed\n", vlan, vf);
1252 return status;
1253}
1254
Ajit Khapardee1d18732010-07-23 01:52:13 +00001255static int be_set_vf_tx_rate(struct net_device *netdev,
1256 int vf, int rate)
1257{
1258 struct be_adapter *adapter = netdev_priv(netdev);
1259 int status = 0;
1260
Sathya Perla11ac75e2011-12-13 00:58:50 +00001261 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001262 return -EPERM;
1263
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001264 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001265 return -EINVAL;
1266
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001267 if (rate < 100 || rate > 10000) {
1268 dev_err(&adapter->pdev->dev,
1269 "tx rate must be between 100 and 10000 Mbps\n");
1270 return -EINVAL;
1271 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001272
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001273 if (lancer_chip(adapter))
1274 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1275 else
1276 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001277
1278 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001279 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001280 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001281 else
1282 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001283 return status;
1284}
1285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001288 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001289 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001290 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001291 u64 pkts;
1292 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001294 if (!eqo->enable_aic) {
1295 eqd = eqo->eqd;
1296 goto modify_eqd;
1297 }
1298
1299 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001300 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1303
Sathya Perla4097f662009-03-24 16:40:13 -07001304 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001305 if (time_before(now, stats->rx_jiffies)) {
1306 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001307 return;
1308 }
1309
Sathya Perlaac124ff2011-07-25 19:10:14 +00001310 /* Update once a second */
1311 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001312 return;
1313
Sathya Perlaab1594e2011-07-25 19:10:15 +00001314 do {
1315 start = u64_stats_fetch_begin_bh(&stats->sync);
1316 pkts = stats->rx_pkts;
1317 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1318
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001319 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001320 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001321 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322 eqd = (stats->rx_pps / 110000) << 3;
1323 eqd = min(eqd, eqo->max_eqd);
1324 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001325 if (eqd < 10)
1326 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001327
1328modify_eqd:
1329 if (eqd != eqo->cur_eqd) {
1330 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1331 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001332 }
Sathya Perla4097f662009-03-24 16:40:13 -07001333}
1334
Sathya Perla3abcded2010-10-03 22:12:27 -07001335static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001336 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001337{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001338 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001339
Sathya Perlaab1594e2011-07-25 19:10:15 +00001340 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001341 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001342 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001344 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001345 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001346 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001347 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001348 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349}
1350
Sathya Perla2e588f82011-03-11 02:49:26 +00001351static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001352{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001353 /* L4 checksum is not reliable for non TCP/UDP packets.
1354 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001355 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1356 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001357}
1358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001359static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1360 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001362 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001364 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365
Sathya Perla3abcded2010-10-03 22:12:27 -07001366 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 BUG_ON(!rx_page_info->page);
1368
Ajit Khaparde205859a2010-02-09 01:34:21 +00001369 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001370 dma_unmap_page(&adapter->pdev->dev,
1371 dma_unmap_addr(rx_page_info, bus),
1372 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001373 rx_page_info->last_page_user = false;
1374 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375
1376 atomic_dec(&rxq->used);
1377 return rx_page_info;
1378}
1379
1380/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001381static void be_rx_compl_discard(struct be_rx_obj *rxo,
1382 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perla3abcded2010-10-03 22:12:27 -07001384 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001386 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001388 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001389 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001390 put_page(page_info->page);
1391 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001392 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 }
1394}
1395
1396/*
1397 * skb_fill_rx_data forms a complete skb for an ether frame
1398 * indicated by rxcp.
1399 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001400static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1401 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402{
Sathya Perla3abcded2010-10-03 22:12:27 -07001403 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001405 u16 i, j;
1406 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 u8 *start;
1408
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001409 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 start = page_address(page_info->page) + page_info->page_offset;
1411 prefetch(start);
1412
1413 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001414 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 skb->len = curr_frag_len;
1417 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001418 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 /* Complete packet has now been moved to data */
1420 put_page(page_info->page);
1421 skb->data_len = 0;
1422 skb->tail += curr_frag_len;
1423 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001424 hdr_len = ETH_HLEN;
1425 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001427 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 skb_shinfo(skb)->frags[0].page_offset =
1429 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001430 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001432 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 skb->tail += hdr_len;
1434 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001435 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436
Sathya Perla2e588f82011-03-11 02:49:26 +00001437 if (rxcp->pkt_size <= rx_frag_size) {
1438 BUG_ON(rxcp->num_rcvd != 1);
1439 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 }
1441
1442 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001443 index_inc(&rxcp->rxq_idx, rxq->len);
1444 remaining = rxcp->pkt_size - curr_frag_len;
1445 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001446 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001447 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001449 /* Coalesce all frags from the same physical page in one slot */
1450 if (page_info->page_offset == 0) {
1451 /* Fresh page */
1452 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001453 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001454 skb_shinfo(skb)->frags[j].page_offset =
1455 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001456 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001457 skb_shinfo(skb)->nr_frags++;
1458 } else {
1459 put_page(page_info->page);
1460 }
1461
Eric Dumazet9e903e02011-10-18 21:00:24 +00001462 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 skb->len += curr_frag_len;
1464 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001465 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001466 remaining -= curr_frag_len;
1467 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001468 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001470 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471}
1472
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001473/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001474static void be_rx_compl_process(struct be_rx_obj *rxo,
1475 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001478 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001480
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001481 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001482 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001483 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001484 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 return;
1486 }
1487
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001490 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001491 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001492 else
1493 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001495 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001496 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001497 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001498 skb->rxhash = rxcp->rss_hash;
1499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500
Jiri Pirko343e43c2011-08-25 02:50:51 +00001501 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001502 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001503
1504 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505}
1506
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001507/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001508static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1509 struct napi_struct *napi,
1510 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001512 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001514 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001515 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001516 u16 remaining, curr_frag_len;
1517 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001518
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001520 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001521 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001522 return;
1523 }
1524
Sathya Perla2e588f82011-03-11 02:49:26 +00001525 remaining = rxcp->pkt_size;
1526 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001527 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528
1529 curr_frag_len = min(remaining, rx_frag_size);
1530
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001531 /* Coalesce all frags from the same physical page in one slot */
1532 if (i == 0 || page_info->page_offset == 0) {
1533 /* First frag or Fresh page */
1534 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001535 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001536 skb_shinfo(skb)->frags[j].page_offset =
1537 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001538 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001539 } else {
1540 put_page(page_info->page);
1541 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001542 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001543 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001545 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 memset(page_info, 0, sizeof(*page_info));
1547 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001548 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001550 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 skb->len = rxcp->pkt_size;
1552 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001553 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001554 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001555 if (adapter->netdev->features & NETIF_F_RXHASH)
1556 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001557
Jiri Pirko343e43c2011-08-25 02:50:51 +00001558 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001559 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001560
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001561 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562}
1563
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001564static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1565 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566{
Sathya Perla2e588f82011-03-11 02:49:26 +00001567 rxcp->pkt_size =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1569 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1570 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1571 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001572 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001573 rxcp->ip_csum =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1575 rxcp->l4_csum =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1577 rxcp->ipv6 =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1579 rxcp->rxq_idx =
1580 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1581 rxcp->num_rcvd =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1583 rxcp->pkt_type =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001585 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001586 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001587 if (rxcp->vlanf) {
1588 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001589 compl);
1590 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1591 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001592 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001593 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001594}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1597 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001598{
1599 rxcp->pkt_size =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1601 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1602 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1603 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001604 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001605 rxcp->ip_csum =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1607 rxcp->l4_csum =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1609 rxcp->ipv6 =
1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1611 rxcp->rxq_idx =
1612 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1613 rxcp->num_rcvd =
1614 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1615 rxcp->pkt_type =
1616 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001617 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001618 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001619 if (rxcp->vlanf) {
1620 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001621 compl);
1622 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1623 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001624 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001625 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001626 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1627 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001628}
1629
1630static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1631{
1632 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1633 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1634 struct be_adapter *adapter = rxo->adapter;
1635
1636 /* For checking the valid bit it is Ok to use either definition as the
1637 * valid bit is at the same position in both v0 and v1 Rx compl */
1638 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 return NULL;
1640
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001641 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001642 be_dws_le_to_cpu(compl, sizeof(*compl));
1643
1644 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001645 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001646 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001647 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001648
Somnath Koture38b1702013-05-29 22:55:56 +00001649 if (rxcp->ip_frag)
1650 rxcp->l4_csum = 0;
1651
Sathya Perla15d72182011-03-21 20:49:26 +00001652 if (rxcp->vlanf) {
1653 /* vlanf could be wrongly set in some cards.
1654 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001655 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001656 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001657
Sathya Perla15d72182011-03-21 20:49:26 +00001658 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001659 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001660
Somnath Kotur939cf302011-08-18 21:51:49 -07001661 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001662 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001663 rxcp->vlanf = 0;
1664 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001665
1666 /* As the compl has been parsed, reset it; we wont touch it again */
1667 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668
Sathya Perla3abcded2010-10-03 22:12:27 -07001669 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 return rxcp;
1671}
1672
Eric Dumazet1829b082011-03-01 05:48:12 +00001673static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001676
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001678 gfp |= __GFP_COMP;
1679 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680}
1681
1682/*
1683 * Allocate a page, split it to fragments of size rx_frag_size and post as
1684 * receive buffers to BE
1685 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001686static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687{
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001689 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001690 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691 struct page *pagep = NULL;
1692 struct be_eth_rx_d *rxd;
1693 u64 page_dmaaddr = 0, frag_dmaaddr;
1694 u32 posted, page_offset = 0;
1695
Sathya Perla3abcded2010-10-03 22:12:27 -07001696 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1698 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001699 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001701 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 break;
1703 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001704 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1705 0, adapter->big_page_size,
1706 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 page_info->page_offset = 0;
1708 } else {
1709 get_page(pagep);
1710 page_info->page_offset = page_offset + rx_frag_size;
1711 }
1712 page_offset = page_info->page_offset;
1713 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001714 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1716
1717 rxd = queue_head_node(rxq);
1718 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1719 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
1721 /* Any space left in the current big page for another frag? */
1722 if ((page_offset + rx_frag_size + rx_frag_size) >
1723 adapter->big_page_size) {
1724 pagep = NULL;
1725 page_info->last_page_user = true;
1726 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001727
1728 prev_page_info = page_info;
1729 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001730 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 }
1732 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001733 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734
1735 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001737 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001738 } else if (atomic_read(&rxq->used) == 0) {
1739 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001740 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742}
1743
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1747
1748 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1749 return NULL;
1750
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001751 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1753
1754 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1755
1756 queue_tail_inc(tx_cq);
1757 return txcp;
1758}
1759
Sathya Perla3c8def92011-06-12 20:01:58 +00001760static u16 be_tx_compl_process(struct be_adapter *adapter,
1761 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762{
Sathya Perla3c8def92011-06-12 20:01:58 +00001763 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001764 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001765 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001767 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1768 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001770 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001772 sent_skbs[txq->tail] = NULL;
1773
1774 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001775 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001777 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001779 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001780 unmap_tx_frag(&adapter->pdev->dev, wrb,
1781 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001782 unmap_skb_hdr = false;
1783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784 num_wrbs++;
1785 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001786 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001789 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790}
1791
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001792/* Return the number of events in the event queue */
1793static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001794{
1795 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001796 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 do {
1799 eqe = queue_tail_node(&eqo->q);
1800 if (eqe->evt == 0)
1801 break;
1802
1803 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001804 eqe->evt = 0;
1805 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001806 queue_tail_inc(&eqo->q);
1807 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001808
1809 return num;
1810}
1811
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001812/* Leaves the EQ is disarmed state */
1813static void be_eq_clean(struct be_eq_obj *eqo)
1814{
1815 int num = events_get(eqo);
1816
1817 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1818}
1819
1820static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821{
1822 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001823 struct be_queue_info *rxq = &rxo->q;
1824 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001825 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001826 struct be_adapter *adapter = rxo->adapter;
1827 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 u16 tail;
1829
Sathya Perlad23e9462012-12-17 19:38:51 +00001830 /* Consume pending rx completions.
1831 * Wait for the flush completion (identified by zero num_rcvd)
1832 * to arrive. Notify CQ even when there are no more CQ entries
1833 * for HW to flush partially coalesced CQ entries.
1834 * In Lancer, there is no need to wait for flush compl.
1835 */
1836 for (;;) {
1837 rxcp = be_rx_compl_get(rxo);
1838 if (rxcp == NULL) {
1839 if (lancer_chip(adapter))
1840 break;
1841
1842 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1843 dev_warn(&adapter->pdev->dev,
1844 "did not receive flush compl\n");
1845 break;
1846 }
1847 be_cq_notify(adapter, rx_cq->id, true, 0);
1848 mdelay(1);
1849 } else {
1850 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001851 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001852 if (rxcp->num_rcvd == 0)
1853 break;
1854 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 }
1856
Sathya Perlad23e9462012-12-17 19:38:51 +00001857 /* After cleanup, leave the CQ in unarmed state */
1858 be_cq_notify(adapter, rx_cq->id, false, 0);
1859
1860 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001862 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001863 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 put_page(page_info->page);
1865 memset(page_info, 0, sizeof(*page_info));
1866 }
1867 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001868 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869}
1870
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001871static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001873 struct be_tx_obj *txo;
1874 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001875 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001876 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001877 struct sk_buff *sent_skb;
1878 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001879 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880
Sathya Perlaa8e91792009-08-10 03:42:43 +00001881 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1882 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001883 pending_txqs = adapter->num_tx_qs;
1884
1885 for_all_tx_queues(adapter, txo, i) {
1886 txq = &txo->q;
1887 while ((txcp = be_tx_compl_get(&txo->cq))) {
1888 end_idx =
1889 AMAP_GET_BITS(struct amap_eth_tx_compl,
1890 wrb_index, txcp);
1891 num_wrbs += be_tx_compl_process(adapter, txo,
1892 end_idx);
1893 cmpl++;
1894 }
1895 if (cmpl) {
1896 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1897 atomic_sub(num_wrbs, &txq->used);
1898 cmpl = 0;
1899 num_wrbs = 0;
1900 }
1901 if (atomic_read(&txq->used) == 0)
1902 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001903 }
1904
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001905 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001906 break;
1907
1908 mdelay(1);
1909 } while (true);
1910
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001911 for_all_tx_queues(adapter, txo, i) {
1912 txq = &txo->q;
1913 if (atomic_read(&txq->used))
1914 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1915 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001916
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001917 /* free posted tx for which compls will never arrive */
1918 while (atomic_read(&txq->used)) {
1919 sent_skb = txo->sent_skb_list[txq->tail];
1920 end_idx = txq->tail;
1921 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1922 &dummy_wrb);
1923 index_adv(&end_idx, num_wrbs - 1, txq->len);
1924 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1925 atomic_sub(num_wrbs, &txq->used);
1926 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001927 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928}
1929
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930static void be_evt_queues_destroy(struct be_adapter *adapter)
1931{
1932 struct be_eq_obj *eqo;
1933 int i;
1934
1935 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001936 if (eqo->q.created) {
1937 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001938 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301939 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001940 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 be_queue_free(adapter, &eqo->q);
1942 }
1943}
1944
1945static int be_evt_queues_create(struct be_adapter *adapter)
1946{
1947 struct be_queue_info *eq;
1948 struct be_eq_obj *eqo;
1949 int i, rc;
1950
Sathya Perla92bf14a2013-08-27 16:57:32 +05301951 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1952 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953
1954 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301955 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1956 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 eqo->adapter = adapter;
1958 eqo->tx_budget = BE_TX_BUDGET;
1959 eqo->idx = i;
1960 eqo->max_eqd = BE_MAX_EQD;
1961 eqo->enable_aic = true;
1962
1963 eq = &eqo->q;
1964 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1965 sizeof(struct be_eq_entry));
1966 if (rc)
1967 return rc;
1968
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301969 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970 if (rc)
1971 return rc;
1972 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001973 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974}
1975
Sathya Perla5fb379e2009-06-18 00:02:59 +00001976static void be_mcc_queues_destroy(struct be_adapter *adapter)
1977{
1978 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001979
Sathya Perla8788fdc2009-07-27 22:52:03 +00001980 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001982 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001983 be_queue_free(adapter, q);
1984
Sathya Perla8788fdc2009-07-27 22:52:03 +00001985 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001986 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001987 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001988 be_queue_free(adapter, q);
1989}
1990
1991/* Must be called only after TX qs are created as MCC shares TX EQ */
1992static int be_mcc_queues_create(struct be_adapter *adapter)
1993{
1994 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001995
Sathya Perla8788fdc2009-07-27 22:52:03 +00001996 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001997 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001998 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001999 goto err;
2000
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002001 /* Use the default EQ for MCC completions */
2002 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002003 goto mcc_cq_free;
2004
Sathya Perla8788fdc2009-07-27 22:52:03 +00002005 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002006 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2007 goto mcc_cq_destroy;
2008
Sathya Perla8788fdc2009-07-27 22:52:03 +00002009 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002010 goto mcc_q_free;
2011
2012 return 0;
2013
2014mcc_q_free:
2015 be_queue_free(adapter, q);
2016mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002017 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002018mcc_cq_free:
2019 be_queue_free(adapter, cq);
2020err:
2021 return -1;
2022}
2023
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024static void be_tx_queues_destroy(struct be_adapter *adapter)
2025{
2026 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002027 struct be_tx_obj *txo;
2028 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029
Sathya Perla3c8def92011-06-12 20:01:58 +00002030 for_all_tx_queues(adapter, txo, i) {
2031 q = &txo->q;
2032 if (q->created)
2033 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2034 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035
Sathya Perla3c8def92011-06-12 20:01:58 +00002036 q = &txo->cq;
2037 if (q->created)
2038 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2039 be_queue_free(adapter, q);
2040 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041}
2042
Sathya Perla77071332013-08-27 16:57:34 +05302043static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002046 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302047 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048
Sathya Perla92bf14a2013-08-27 16:57:32 +05302049 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002050
Sathya Perla3c8def92011-06-12 20:01:58 +00002051 for_all_tx_queues(adapter, txo, i) {
2052 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002053 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2054 sizeof(struct be_eth_tx_compl));
2055 if (status)
2056 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 /* If num_evt_qs is less than num_tx_qs, then more than
2059 * one txq share an eq
2060 */
2061 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2062 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2063 if (status)
2064 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2067 sizeof(struct be_eth_wrb));
2068 if (status)
2069 return status;
2070
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002071 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072 if (status)
2073 return status;
2074 }
2075
Sathya Perlad3791422012-09-28 04:39:44 +00002076 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2077 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002078 return 0;
2079}
2080
2081static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082{
2083 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002084 struct be_rx_obj *rxo;
2085 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086
Sathya Perla3abcded2010-10-03 22:12:27 -07002087 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002088 q = &rxo->cq;
2089 if (q->created)
2090 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2091 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093}
2094
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002096{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 struct be_rx_obj *rxo;
2099 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100
Sathya Perla92bf14a2013-08-27 16:57:32 +05302101 /* We can create as many RSS rings as there are EQs. */
2102 adapter->num_rx_qs = adapter->num_evt_qs;
2103
2104 /* We'll use RSS only if atleast 2 RSS rings are supported.
2105 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302107 if (adapter->num_rx_qs > 1)
2108 adapter->num_rx_qs++;
2109
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002111 for_all_rx_queues(adapter, rxo, i) {
2112 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 cq = &rxo->cq;
2114 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2115 sizeof(struct be_eth_rx_compl));
2116 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2120 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002121 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124
Sathya Perlad3791422012-09-28 04:39:44 +00002125 dev_info(&adapter->pdev->dev,
2126 "created %d RSS queue(s) and 1 default RX queue\n",
2127 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002129}
2130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131static irqreturn_t be_intx(int irq, void *dev)
2132{
Sathya Perlae49cc342012-11-27 19:50:02 +00002133 struct be_eq_obj *eqo = dev;
2134 struct be_adapter *adapter = eqo->adapter;
2135 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002137 /* IRQ is not expected when NAPI is scheduled as the EQ
2138 * will not be armed.
2139 * But, this can happen on Lancer INTx where it takes
2140 * a while to de-assert INTx or in BE2 where occasionaly
2141 * an interrupt may be raised even when EQ is unarmed.
2142 * If NAPI is already scheduled, then counting & notifying
2143 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002144 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002145 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002146 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002147 __napi_schedule(&eqo->napi);
2148 if (num_evts)
2149 eqo->spurious_intr = 0;
2150 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002151 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002152
2153 /* Return IRQ_HANDLED only for the the first spurious intr
2154 * after a valid intr to stop the kernel from branding
2155 * this irq as a bad one!
2156 */
2157 if (num_evts || eqo->spurious_intr++ == 0)
2158 return IRQ_HANDLED;
2159 else
2160 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161}
2162
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002163static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002166
Sathya Perla0b545a62012-11-23 00:27:18 +00002167 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2168 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169 return IRQ_HANDLED;
2170}
2171
Sathya Perla2e588f82011-03-11 02:49:26 +00002172static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173{
Somnath Koture38b1702013-05-29 22:55:56 +00002174 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175}
2176
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002177static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2178 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179{
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 struct be_adapter *adapter = rxo->adapter;
2181 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002182 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183 u32 work_done;
2184
2185 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002186 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187 if (!rxcp)
2188 break;
2189
Sathya Perla12004ae2011-08-02 19:57:46 +00002190 /* Is it a flush compl that has no data */
2191 if (unlikely(rxcp->num_rcvd == 0))
2192 goto loop_continue;
2193
2194 /* Discard compl with partial DMA Lancer B0 */
2195 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002197 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002198 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002199
Sathya Perla12004ae2011-08-02 19:57:46 +00002200 /* On BE drop pkts that arrive due to imperfect filtering in
2201 * promiscuous mode on some skews
2202 */
2203 if (unlikely(rxcp->port != adapter->port_num &&
2204 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002206 goto loop_continue;
2207 }
2208
2209 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002211 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002213loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002214 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 }
2216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 if (work_done) {
2218 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002219
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002220 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2221 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224 return work_done;
2225}
2226
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002227static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2228 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233 for (work_done = 0; work_done < budget; work_done++) {
2234 txcp = be_tx_compl_get(&txo->cq);
2235 if (!txcp)
2236 break;
2237 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002238 AMAP_GET_BITS(struct amap_eth_tx_compl,
2239 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240 }
2241
2242 if (work_done) {
2243 be_cq_notify(adapter, txo->cq.id, true, work_done);
2244 atomic_sub(num_wrbs, &txo->q.used);
2245
2246 /* As Tx wrbs have been freed up, wake up netdev queue
2247 * if it was stopped due to lack of tx wrbs. */
2248 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2249 atomic_read(&txo->q.used) < txo->q.len / 2) {
2250 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002251 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002252
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2254 tx_stats(txo)->tx_compl += work_done;
2255 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2256 }
2257 return (work_done < budget); /* Done */
2258}
Sathya Perla3c8def92011-06-12 20:01:58 +00002259
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302260int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261{
2262 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2263 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002264 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002265 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002266
Sathya Perla0b545a62012-11-23 00:27:18 +00002267 num_evts = events_get(eqo);
2268
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 /* Process all TXQs serviced by this EQ */
2270 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2271 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2272 eqo->tx_budget, i);
2273 if (!tx_done)
2274 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275 }
2276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277 /* This loop will iterate twice for EQ0 in which
2278 * completions of the last RXQ (default one) are also processed
2279 * For other EQs the loop iterates only once
2280 */
2281 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2282 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2283 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002284 }
2285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 if (is_mcc_eqo(eqo))
2287 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002288
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 if (max_work < budget) {
2290 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002291 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 } else {
2293 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002294 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002295 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297}
2298
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002299void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002300{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002301 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2302 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002303 u32 i;
2304
Sathya Perlad23e9462012-12-17 19:38:51 +00002305 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002306 return;
2307
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002308 if (lancer_chip(adapter)) {
2309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2310 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2311 sliport_err1 = ioread32(adapter->db +
2312 SLIPORT_ERROR1_OFFSET);
2313 sliport_err2 = ioread32(adapter->db +
2314 SLIPORT_ERROR2_OFFSET);
2315 }
2316 } else {
2317 pci_read_config_dword(adapter->pdev,
2318 PCICFG_UE_STATUS_LOW, &ue_lo);
2319 pci_read_config_dword(adapter->pdev,
2320 PCICFG_UE_STATUS_HIGH, &ue_hi);
2321 pci_read_config_dword(adapter->pdev,
2322 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2323 pci_read_config_dword(adapter->pdev,
2324 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002325
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002326 ue_lo = (ue_lo & ~ue_lo_mask);
2327 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002328 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002329
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002330 /* On certain platforms BE hardware can indicate spurious UEs.
2331 * Allow the h/w to stop working completely in case of a real UE.
2332 * Hence not setting the hw_error for UE detection.
2333 */
2334 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002335 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002336 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002337 "Error detected in the card\n");
2338 }
2339
2340 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2341 dev_err(&adapter->pdev->dev,
2342 "ERR: sliport status 0x%x\n", sliport_status);
2343 dev_err(&adapter->pdev->dev,
2344 "ERR: sliport error1 0x%x\n", sliport_err1);
2345 dev_err(&adapter->pdev->dev,
2346 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002347 }
2348
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002349 if (ue_lo) {
2350 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2351 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002352 dev_err(&adapter->pdev->dev,
2353 "UE: %s bit set\n", ue_status_low_desc[i]);
2354 }
2355 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002356
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002357 if (ue_hi) {
2358 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2359 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002360 dev_err(&adapter->pdev->dev,
2361 "UE: %s bit set\n", ue_status_hi_desc[i]);
2362 }
2363 }
2364
2365}
2366
Sathya Perla8d56ff12009-11-22 22:02:26 +00002367static void be_msix_disable(struct be_adapter *adapter)
2368{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002369 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002370 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002371 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302372 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002373 }
2374}
2375
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002376static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302378 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002379 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380
Sathya Perla92bf14a2013-08-27 16:57:32 +05302381 /* If RoCE is supported, program the max number of NIC vectors that
2382 * may be configured via set-channels, along with vectors needed for
2383 * RoCe. Else, just program the number we'll use initially.
2384 */
2385 if (be_roce_supported(adapter))
2386 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2387 2 * num_online_cpus());
2388 else
2389 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002390
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002391 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392 adapter->msix_entries[i].entry = i;
2393
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002394 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002395 if (status == 0) {
2396 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302397 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002398 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002399 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2400 num_vec);
2401 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002402 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002403 }
Sathya Perlad3791422012-09-28 04:39:44 +00002404
2405 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302406
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002407 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2408 if (!be_physfn(adapter))
2409 return status;
2410 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002411done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302412 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2413 adapter->num_msix_roce_vec = num_vec / 2;
2414 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2415 adapter->num_msix_roce_vec);
2416 }
2417
2418 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2419
2420 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2421 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002422 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423}
2424
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002425static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302428 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429}
2430
2431static int be_msix_register(struct be_adapter *adapter)
2432{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 struct net_device *netdev = adapter->netdev;
2434 struct be_eq_obj *eqo;
2435 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 for_all_evt_queues(adapter, eqo, i) {
2438 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2439 vec = be_msix_vec_get(adapter, eqo);
2440 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002441 if (status)
2442 goto err_msix;
2443 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002444
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002446err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2448 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2449 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2450 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002451 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002452 return status;
2453}
2454
2455static int be_irq_register(struct be_adapter *adapter)
2456{
2457 struct net_device *netdev = adapter->netdev;
2458 int status;
2459
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002460 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461 status = be_msix_register(adapter);
2462 if (status == 0)
2463 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002464 /* INTx is not supported for VF */
2465 if (!be_physfn(adapter))
2466 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002467 }
2468
Sathya Perlae49cc342012-11-27 19:50:02 +00002469 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470 netdev->irq = adapter->pdev->irq;
2471 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002472 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473 if (status) {
2474 dev_err(&adapter->pdev->dev,
2475 "INTx request IRQ failed - err %d\n", status);
2476 return status;
2477 }
2478done:
2479 adapter->isr_registered = true;
2480 return 0;
2481}
2482
2483static void be_irq_unregister(struct be_adapter *adapter)
2484{
2485 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002486 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002487 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488
2489 if (!adapter->isr_registered)
2490 return;
2491
2492 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002493 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002494 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 goto done;
2496 }
2497
2498 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002499 for_all_evt_queues(adapter, eqo, i)
2500 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002501
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502done:
2503 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504}
2505
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002506static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002507{
2508 struct be_queue_info *q;
2509 struct be_rx_obj *rxo;
2510 int i;
2511
2512 for_all_rx_queues(adapter, rxo, i) {
2513 q = &rxo->q;
2514 if (q->created) {
2515 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002517 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002519 }
2520}
2521
Sathya Perla889cd4b2010-05-30 23:33:45 +00002522static int be_close(struct net_device *netdev)
2523{
2524 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 struct be_eq_obj *eqo;
2526 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002527
Parav Pandit045508a2012-03-26 14:27:13 +00002528 be_roce_dev_close(adapter);
2529
Somnath Kotur04d3d622013-05-02 03:36:55 +00002530 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2531 for_all_evt_queues(adapter, eqo, i)
2532 napi_disable(&eqo->napi);
2533 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2534 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002535
2536 be_async_mcc_disable(adapter);
2537
2538 /* Wait for all pending tx completions to arrive so that
2539 * all tx skbs are freed.
2540 */
Sathya Perlafba87552013-05-08 02:05:50 +00002541 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302542 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002543
2544 be_rx_qs_destroy(adapter);
2545
2546 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002547 if (msix_enabled(adapter))
2548 synchronize_irq(be_msix_vec_get(adapter, eqo));
2549 else
2550 synchronize_irq(netdev->irq);
2551 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002552 }
2553
Sathya Perla889cd4b2010-05-30 23:33:45 +00002554 be_irq_unregister(adapter);
2555
Sathya Perla482c9e72011-06-29 23:33:17 +00002556 return 0;
2557}
2558
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002559static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002560{
2561 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002562 int rc, i, j;
2563 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002564
2565 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2567 sizeof(struct be_eth_rx_d));
2568 if (rc)
2569 return rc;
2570 }
2571
2572 /* The FW would like the default RXQ to be created first */
2573 rxo = default_rxo(adapter);
2574 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2575 adapter->if_handle, false, &rxo->rss_id);
2576 if (rc)
2577 return rc;
2578
2579 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002580 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 rx_frag_size, adapter->if_handle,
2582 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002583 if (rc)
2584 return rc;
2585 }
2586
2587 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002588 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2589 for_all_rss_queues(adapter, rxo, i) {
2590 if ((j + i) >= 128)
2591 break;
2592 rsstable[j + i] = rxo->rss_id;
2593 }
2594 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002595 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2596 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2597
2598 if (!BEx_chip(adapter))
2599 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2600 RSS_ENABLE_UDP_IPV6;
2601
2602 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2603 128);
2604 if (rc) {
2605 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002606 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002607 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002608 }
2609
2610 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002612 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002613 return 0;
2614}
2615
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616static int be_open(struct net_device *netdev)
2617{
2618 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002620 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002622 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002623 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002624
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002626 if (status)
2627 goto err;
2628
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002629 status = be_irq_register(adapter);
2630 if (status)
2631 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002632
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002634 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 for_all_tx_queues(adapter, txo, i)
2637 be_cq_notify(adapter, txo->cq.id, true, 0);
2638
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002639 be_async_mcc_enable(adapter);
2640
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641 for_all_evt_queues(adapter, eqo, i) {
2642 napi_enable(&eqo->napi);
2643 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2644 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002645 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646
Sathya Perla323ff712012-09-28 04:39:43 +00002647 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002648 if (!status)
2649 be_link_status_update(adapter, link_status);
2650
Sathya Perlafba87552013-05-08 02:05:50 +00002651 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002652 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002653 return 0;
2654err:
2655 be_close(adapter->netdev);
2656 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657}
2658
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002659static int be_setup_wol(struct be_adapter *adapter, bool enable)
2660{
2661 struct be_dma_mem cmd;
2662 int status = 0;
2663 u8 mac[ETH_ALEN];
2664
2665 memset(mac, 0, ETH_ALEN);
2666
2667 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002668 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2669 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002670 if (cmd.va == NULL)
2671 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002672
2673 if (enable) {
2674 status = pci_write_config_dword(adapter->pdev,
2675 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2676 if (status) {
2677 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002678 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002679 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2680 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002681 return status;
2682 }
2683 status = be_cmd_enable_magic_wol(adapter,
2684 adapter->netdev->dev_addr, &cmd);
2685 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2686 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2687 } else {
2688 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2689 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2690 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2691 }
2692
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002693 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002694 return status;
2695}
2696
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002697/*
2698 * Generate a seed MAC address from the PF MAC Address using jhash.
2699 * MAC Address for VFs are assigned incrementally starting from the seed.
2700 * These addresses are programmed in the ASIC by the PF and the VF driver
2701 * queries for the MAC address during its probe.
2702 */
Sathya Perla4c876612013-02-03 20:30:11 +00002703static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002704{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002705 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002706 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002707 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002708 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002709
2710 be_vf_eth_addr_generate(adapter, mac);
2711
Sathya Perla11ac75e2011-12-13 00:58:50 +00002712 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302713 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002715 vf_cfg->if_handle,
2716 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302717 else
2718 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2719 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002720
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002721 if (status)
2722 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002723 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002724 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002725 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002726
2727 mac[5] += 1;
2728 }
2729 return status;
2730}
2731
Sathya Perla4c876612013-02-03 20:30:11 +00002732static int be_vfs_mac_query(struct be_adapter *adapter)
2733{
2734 int status, vf;
2735 u8 mac[ETH_ALEN];
2736 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302737 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002738
2739 for_all_vfs(adapter, vf_cfg, vf) {
2740 be_cmd_get_mac_from_list(adapter, mac, &active,
2741 &vf_cfg->pmac_id, 0);
2742
2743 status = be_cmd_mac_addr_query(adapter, mac, false,
2744 vf_cfg->if_handle, 0);
2745 if (status)
2746 return status;
2747 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2748 }
2749 return 0;
2750}
2751
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002752static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002753{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002754 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002755 u32 vf;
2756
Sathya Perla257a3fe2013-06-14 15:54:51 +05302757 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002758 dev_warn(&adapter->pdev->dev,
2759 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002760 goto done;
2761 }
2762
Sathya Perlab4c1df92013-05-08 02:05:47 +00002763 pci_disable_sriov(adapter->pdev);
2764
Sathya Perla11ac75e2011-12-13 00:58:50 +00002765 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302766 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002767 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2768 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302769 else
2770 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2771 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002772
Sathya Perla11ac75e2011-12-13 00:58:50 +00002773 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2774 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002775done:
2776 kfree(adapter->vf_cfg);
2777 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002778}
2779
Sathya Perla77071332013-08-27 16:57:34 +05302780static void be_clear_queues(struct be_adapter *adapter)
2781{
2782 be_mcc_queues_destroy(adapter);
2783 be_rx_cqs_destroy(adapter);
2784 be_tx_queues_destroy(adapter);
2785 be_evt_queues_destroy(adapter);
2786}
2787
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302788static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002789{
Sathya Perla191eb752012-02-23 18:50:13 +00002790 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2791 cancel_delayed_work_sync(&adapter->work);
2792 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2793 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302794}
2795
2796static int be_clear(struct be_adapter *adapter)
2797{
2798 int i;
2799
2800 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002801
Sathya Perla11ac75e2011-12-13 00:58:50 +00002802 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002803 be_vf_clear(adapter);
2804
Sathya Perla2d17f402013-07-23 15:25:04 +05302805 /* delete the primary mac along with the uc-mac list */
2806 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002807 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302808 adapter->pmac_id[i], 0);
2809 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002810
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002811 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002812
Sathya Perla77071332013-08-27 16:57:34 +05302813 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002814
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002815 kfree(adapter->pmac_id);
2816 adapter->pmac_id = NULL;
2817
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002818 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002819 return 0;
2820}
2821
Sathya Perla4c876612013-02-03 20:30:11 +00002822static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002823{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302824 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002825 struct be_vf_cfg *vf_cfg;
2826 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002827 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002828
Sathya Perla4c876612013-02-03 20:30:11 +00002829 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2830 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002831
Sathya Perla4c876612013-02-03 20:30:11 +00002832 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302833 if (!BE3_chip(adapter)) {
2834 status = be_cmd_get_profile_config(adapter, &res,
2835 vf + 1);
2836 if (!status)
2837 cap_flags = res.if_cap_flags;
2838 }
Sathya Perla4c876612013-02-03 20:30:11 +00002839
2840 /* If a FW profile exists, then cap_flags are updated */
2841 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2842 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2843 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2844 &vf_cfg->if_handle, vf + 1);
2845 if (status)
2846 goto err;
2847 }
2848err:
2849 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002850}
2851
Sathya Perla39f1d942012-05-08 19:41:24 +00002852static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002853{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002854 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002855 int vf;
2856
Sathya Perla39f1d942012-05-08 19:41:24 +00002857 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2858 GFP_KERNEL);
2859 if (!adapter->vf_cfg)
2860 return -ENOMEM;
2861
Sathya Perla11ac75e2011-12-13 00:58:50 +00002862 for_all_vfs(adapter, vf_cfg, vf) {
2863 vf_cfg->if_handle = -1;
2864 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002865 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002866 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002867}
2868
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002869static int be_vf_setup(struct be_adapter *adapter)
2870{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002871 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002872 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002873 int status, old_vfs, vf;
2874 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302875 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002876
Sathya Perla257a3fe2013-06-14 15:54:51 +05302877 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002878 if (old_vfs) {
2879 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2880 if (old_vfs != num_vfs)
2881 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2882 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002883 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302884 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002885 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302886 be_max_vfs(adapter), num_vfs);
2887 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002888 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002889 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002890 }
2891
2892 status = be_vf_setup_init(adapter);
2893 if (status)
2894 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002895
Sathya Perla4c876612013-02-03 20:30:11 +00002896 if (old_vfs) {
2897 for_all_vfs(adapter, vf_cfg, vf) {
2898 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2899 if (status)
2900 goto err;
2901 }
2902 } else {
2903 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002904 if (status)
2905 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002906 }
2907
Sathya Perla4c876612013-02-03 20:30:11 +00002908 if (old_vfs) {
2909 status = be_vfs_mac_query(adapter);
2910 if (status)
2911 goto err;
2912 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002913 status = be_vf_eth_addr_config(adapter);
2914 if (status)
2915 goto err;
2916 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002917
Sathya Perla11ac75e2011-12-13 00:58:50 +00002918 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302919 /* Allow VFs to programs MAC/VLAN filters */
2920 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2921 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2922 status = be_cmd_set_fn_privileges(adapter,
2923 privileges |
2924 BE_PRIV_FILTMGMT,
2925 vf + 1);
2926 if (!status)
2927 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2928 vf);
2929 }
2930
Sathya Perla4c876612013-02-03 20:30:11 +00002931 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2932 * Allow full available bandwidth
2933 */
2934 if (BE3_chip(adapter) && !old_vfs)
2935 be_cmd_set_qos(adapter, 1000, vf+1);
2936
2937 status = be_cmd_link_status_query(adapter, &lnk_speed,
2938 NULL, vf + 1);
2939 if (!status)
2940 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002941
2942 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05002943 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002944 if (status)
2945 goto err;
2946 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002947
2948 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002949 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002950
2951 if (!old_vfs) {
2952 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2953 if (status) {
2954 dev_err(dev, "SRIOV enable failed\n");
2955 adapter->num_vfs = 0;
2956 goto err;
2957 }
2958 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002959 return 0;
2960err:
Sathya Perla4c876612013-02-03 20:30:11 +00002961 dev_err(dev, "VF setup failed\n");
2962 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002963 return status;
2964}
2965
Sathya Perla92bf14a2013-08-27 16:57:32 +05302966/* On BE2/BE3 FW does not suggest the supported limits */
2967static void BEx_get_resources(struct be_adapter *adapter,
2968 struct be_resources *res)
2969{
2970 struct pci_dev *pdev = adapter->pdev;
2971 bool use_sriov = false;
2972
2973 if (BE3_chip(adapter) && be_physfn(adapter)) {
2974 int max_vfs;
2975
2976 max_vfs = pci_sriov_get_totalvfs(pdev);
2977 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2978 use_sriov = res->max_vfs && num_vfs;
2979 }
2980
2981 if (be_physfn(adapter))
2982 res->max_uc_mac = BE_UC_PMAC_COUNT;
2983 else
2984 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2985
2986 if (adapter->function_mode & FLEX10_MODE)
2987 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05002988 else if (adapter->function_mode & UMC_ENABLED)
2989 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302990 else
2991 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2992 res->max_mcast_mac = BE_MAX_MC;
2993
2994 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2995 !be_physfn(adapter))
2996 res->max_tx_qs = 1;
2997 else
2998 res->max_tx_qs = BE3_MAX_TX_QS;
2999
3000 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3001 !use_sriov && be_physfn(adapter))
3002 res->max_rss_qs = (adapter->be3_native) ?
3003 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3004 res->max_rx_qs = res->max_rss_qs + 1;
3005
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303006 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303007
3008 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3009 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3010 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3011}
3012
Sathya Perla30128032011-11-10 19:17:57 +00003013static void be_setup_init(struct be_adapter *adapter)
3014{
3015 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003016 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003017 adapter->if_handle = -1;
3018 adapter->be3_native = false;
3019 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003020 if (be_physfn(adapter))
3021 adapter->cmd_privileges = MAX_PRIVILEGES;
3022 else
3023 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003024}
3025
Sathya Perla92bf14a2013-08-27 16:57:32 +05303026static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003027{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303028 struct device *dev = &adapter->pdev->dev;
3029 struct be_resources res = {0};
3030 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003031
Sathya Perla92bf14a2013-08-27 16:57:32 +05303032 if (BEx_chip(adapter)) {
3033 BEx_get_resources(adapter, &res);
3034 adapter->res = res;
3035 }
3036
3037 /* For BE3 only check if FW suggests a different max-txqs value */
3038 if (BE3_chip(adapter)) {
3039 status = be_cmd_get_profile_config(adapter, &res, 0);
3040 if (!status && res.max_tx_qs)
3041 adapter->res.max_tx_qs =
3042 min(adapter->res.max_tx_qs, res.max_tx_qs);
3043 }
3044
3045 /* For Lancer, SH etc read per-function resource limits from FW.
3046 * GET_FUNC_CONFIG returns per function guaranteed limits.
3047 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3048 */
Sathya Perla4c876612013-02-03 20:30:11 +00003049 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303050 status = be_cmd_get_func_config(adapter, &res);
3051 if (status)
3052 return status;
3053
3054 /* If RoCE may be enabled stash away half the EQs for RoCE */
3055 if (be_roce_supported(adapter))
3056 res.max_evt_qs /= 2;
3057 adapter->res = res;
3058
3059 if (be_physfn(adapter)) {
3060 status = be_cmd_get_profile_config(adapter, &res, 0);
3061 if (status)
3062 return status;
3063 adapter->res.max_vfs = res.max_vfs;
3064 }
3065
3066 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3067 be_max_txqs(adapter), be_max_rxqs(adapter),
3068 be_max_rss(adapter), be_max_eqs(adapter),
3069 be_max_vfs(adapter));
3070 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3071 be_max_uc(adapter), be_max_mc(adapter),
3072 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003073 }
3074
Sathya Perla92bf14a2013-08-27 16:57:32 +05303075 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003076}
3077
Sathya Perla39f1d942012-05-08 19:41:24 +00003078/* Routine to query per function resource limits */
3079static int be_get_config(struct be_adapter *adapter)
3080{
Sathya Perla4c876612013-02-03 20:30:11 +00003081 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003082
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003083 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3084 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003085 &adapter->function_caps,
3086 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003087 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303088 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003089
Sathya Perla92bf14a2013-08-27 16:57:32 +05303090 status = be_get_resources(adapter);
3091 if (status)
3092 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003093
3094 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303095 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3096 GFP_KERNEL);
3097 if (!adapter->pmac_id)
3098 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003099
Sathya Perla92bf14a2013-08-27 16:57:32 +05303100 /* Sanitize cfg_num_qs based on HW and platform limits */
3101 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3102
3103 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003104}
3105
Sathya Perla95046b92013-07-23 15:25:02 +05303106static int be_mac_setup(struct be_adapter *adapter)
3107{
3108 u8 mac[ETH_ALEN];
3109 int status;
3110
3111 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3112 status = be_cmd_get_perm_mac(adapter, mac);
3113 if (status)
3114 return status;
3115
3116 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3117 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3118 } else {
3119 /* Maybe the HW was reset; dev_addr must be re-programmed */
3120 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3121 }
3122
3123 /* On BE3 VFs this cmd may fail due to lack of privilege.
3124 * Ignore the failure as in this case pmac_id is fetched
3125 * in the IFACE_CREATE cmd.
3126 */
3127 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3128 &adapter->pmac_id[0], 0);
3129 return 0;
3130}
3131
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303132static void be_schedule_worker(struct be_adapter *adapter)
3133{
3134 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3135 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3136}
3137
Sathya Perla77071332013-08-27 16:57:34 +05303138static int be_setup_queues(struct be_adapter *adapter)
3139{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303140 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303141 int status;
3142
3143 status = be_evt_queues_create(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_tx_qs_create(adapter);
3148 if (status)
3149 goto err;
3150
3151 status = be_rx_cqs_create(adapter);
3152 if (status)
3153 goto err;
3154
3155 status = be_mcc_queues_create(adapter);
3156 if (status)
3157 goto err;
3158
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303159 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3160 if (status)
3161 goto err;
3162
3163 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3164 if (status)
3165 goto err;
3166
Sathya Perla77071332013-08-27 16:57:34 +05303167 return 0;
3168err:
3169 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3170 return status;
3171}
3172
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303173int be_update_queues(struct be_adapter *adapter)
3174{
3175 struct net_device *netdev = adapter->netdev;
3176 int status;
3177
3178 if (netif_running(netdev))
3179 be_close(netdev);
3180
3181 be_cancel_worker(adapter);
3182
3183 /* If any vectors have been shared with RoCE we cannot re-program
3184 * the MSIx table.
3185 */
3186 if (!adapter->num_msix_roce_vec)
3187 be_msix_disable(adapter);
3188
3189 be_clear_queues(adapter);
3190
3191 if (!msix_enabled(adapter)) {
3192 status = be_msix_enable(adapter);
3193 if (status)
3194 return status;
3195 }
3196
3197 status = be_setup_queues(adapter);
3198 if (status)
3199 return status;
3200
3201 be_schedule_worker(adapter);
3202
3203 if (netif_running(netdev))
3204 status = be_open(netdev);
3205
3206 return status;
3207}
3208
Sathya Perla5fb379e2009-06-18 00:02:59 +00003209static int be_setup(struct be_adapter *adapter)
3210{
Sathya Perla39f1d942012-05-08 19:41:24 +00003211 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303212 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003213 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214
Sathya Perla30128032011-11-10 19:17:57 +00003215 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003217 if (!lancer_chip(adapter))
3218 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003219
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003220 status = be_get_config(adapter);
3221 if (status)
3222 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003223
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003224 status = be_msix_enable(adapter);
3225 if (status)
3226 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003227
Sathya Perla77071332013-08-27 16:57:34 +05303228 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3229 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3230 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3231 en_flags |= BE_IF_FLAGS_RSS;
3232 en_flags = en_flags & be_if_cap_flags(adapter);
3233 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3234 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003235 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003236 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303238 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3239 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303240 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303241 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003242 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003243 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003244
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003245 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3246 /* In UMC mode FW does not return right privileges.
3247 * Override with correct privilege equivalent to PF.
3248 */
3249 if (be_is_mc(adapter))
3250 adapter->cmd_privileges = MAX_PRIVILEGES;
3251
Sathya Perla95046b92013-07-23 15:25:02 +05303252 status = be_mac_setup(adapter);
3253 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003254 goto err;
3255
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003256 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003257
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003258 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003259 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003260
3261 be_set_rx_mode(adapter->netdev);
3262
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003263 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003264
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003265 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3266 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003267 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003268
Sathya Perla92bf14a2013-08-27 16:57:32 +05303269 if (be_physfn(adapter) && num_vfs) {
3270 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003271 be_vf_setup(adapter);
3272 else
3273 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003274 }
3275
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003276 status = be_cmd_get_phy_info(adapter);
3277 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003278 adapter->phy.fc_autoneg = 1;
3279
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303280 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003281 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003282err:
3283 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 return status;
3285}
3286
Ivan Vecera66268732011-12-08 01:31:21 +00003287#ifdef CONFIG_NET_POLL_CONTROLLER
3288static void be_netpoll(struct net_device *netdev)
3289{
3290 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003291 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003292 int i;
3293
Sathya Perlae49cc342012-11-27 19:50:02 +00003294 for_all_evt_queues(adapter, eqo, i) {
3295 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3296 napi_schedule(&eqo->napi);
3297 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003298
3299 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003300}
3301#endif
3302
Ajit Khaparde84517482009-09-04 03:12:16 +00003303#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003304static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003305
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003306static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003307 const u8 *p, u32 img_start, int image_size,
3308 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003309{
3310 u32 crc_offset;
3311 u8 flashed_crc[4];
3312 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003313
3314 crc_offset = hdr_size + img_start + image_size - 4;
3315
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003316 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003317
3318 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003319 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003320 if (status) {
3321 dev_err(&adapter->pdev->dev,
3322 "could not get crc from flash, not flashing redboot\n");
3323 return false;
3324 }
3325
3326 /*update redboot only if crc does not match*/
3327 if (!memcmp(flashed_crc, p, 4))
3328 return false;
3329 else
3330 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003331}
3332
Sathya Perla306f1342011-08-02 19:57:45 +00003333static bool phy_flashing_required(struct be_adapter *adapter)
3334{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003335 return (adapter->phy.phy_type == TN_8022 &&
3336 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003337}
3338
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003339static bool is_comp_in_ufi(struct be_adapter *adapter,
3340 struct flash_section_info *fsec, int type)
3341{
3342 int i = 0, img_type = 0;
3343 struct flash_section_info_g2 *fsec_g2 = NULL;
3344
Sathya Perlaca34fe32012-11-06 17:48:56 +00003345 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003346 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3347
3348 for (i = 0; i < MAX_FLASH_COMP; i++) {
3349 if (fsec_g2)
3350 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3351 else
3352 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3353
3354 if (img_type == type)
3355 return true;
3356 }
3357 return false;
3358
3359}
3360
Jingoo Han4188e7d2013-08-05 18:02:02 +09003361static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003362 int header_size,
3363 const struct firmware *fw)
3364{
3365 struct flash_section_info *fsec = NULL;
3366 const u8 *p = fw->data;
3367
3368 p += header_size;
3369 while (p < (fw->data + fw->size)) {
3370 fsec = (struct flash_section_info *)p;
3371 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3372 return fsec;
3373 p += 32;
3374 }
3375 return NULL;
3376}
3377
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003378static int be_flash(struct be_adapter *adapter, const u8 *img,
3379 struct be_dma_mem *flash_cmd, int optype, int img_size)
3380{
3381 u32 total_bytes = 0, flash_op, num_bytes = 0;
3382 int status = 0;
3383 struct be_cmd_write_flashrom *req = flash_cmd->va;
3384
3385 total_bytes = img_size;
3386 while (total_bytes) {
3387 num_bytes = min_t(u32, 32*1024, total_bytes);
3388
3389 total_bytes -= num_bytes;
3390
3391 if (!total_bytes) {
3392 if (optype == OPTYPE_PHY_FW)
3393 flash_op = FLASHROM_OPER_PHY_FLASH;
3394 else
3395 flash_op = FLASHROM_OPER_FLASH;
3396 } else {
3397 if (optype == OPTYPE_PHY_FW)
3398 flash_op = FLASHROM_OPER_PHY_SAVE;
3399 else
3400 flash_op = FLASHROM_OPER_SAVE;
3401 }
3402
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003403 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003404 img += num_bytes;
3405 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3406 flash_op, num_bytes);
3407 if (status) {
3408 if (status == ILLEGAL_IOCTL_REQ &&
3409 optype == OPTYPE_PHY_FW)
3410 break;
3411 dev_err(&adapter->pdev->dev,
3412 "cmd to write to flash rom failed.\n");
3413 return status;
3414 }
3415 }
3416 return 0;
3417}
3418
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003419/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003420static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003421 const struct firmware *fw,
3422 struct be_dma_mem *flash_cmd,
3423 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003424
Ajit Khaparde84517482009-09-04 03:12:16 +00003425{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003426 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003427 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003428 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003429 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003430 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003431 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003432
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003433 struct flash_comp gen3_flash_types[] = {
3434 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3435 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3436 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3437 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3438 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3439 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3440 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3441 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3442 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3443 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3444 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3445 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3446 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3447 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3448 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3449 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3450 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3451 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3452 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3453 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003454 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003455
3456 struct flash_comp gen2_flash_types[] = {
3457 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3458 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3459 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3460 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3461 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3462 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3463 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3464 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3465 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3466 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3467 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3468 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3469 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3470 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3471 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3472 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003473 };
3474
Sathya Perlaca34fe32012-11-06 17:48:56 +00003475 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003476 pflashcomp = gen3_flash_types;
3477 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003478 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003479 } else {
3480 pflashcomp = gen2_flash_types;
3481 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003482 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003483 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003484
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003485 /* Get flash section info*/
3486 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3487 if (!fsec) {
3488 dev_err(&adapter->pdev->dev,
3489 "Invalid Cookie. UFI corrupted ?\n");
3490 return -1;
3491 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003492 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003493 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003494 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003495
3496 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3497 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3498 continue;
3499
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003500 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3501 !phy_flashing_required(adapter))
3502 continue;
3503
3504 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3505 redboot = be_flash_redboot(adapter, fw->data,
3506 pflashcomp[i].offset, pflashcomp[i].size,
3507 filehdr_size + img_hdrs_size);
3508 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003509 continue;
3510 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003511
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003512 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003513 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003514 if (p + pflashcomp[i].size > fw->data + fw->size)
3515 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003516
3517 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3518 pflashcomp[i].size);
3519 if (status) {
3520 dev_err(&adapter->pdev->dev,
3521 "Flashing section type %d failed.\n",
3522 pflashcomp[i].img_type);
3523 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003524 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003525 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003526 return 0;
3527}
3528
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003529static int be_flash_skyhawk(struct be_adapter *adapter,
3530 const struct firmware *fw,
3531 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003532{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003533 int status = 0, i, filehdr_size = 0;
3534 int img_offset, img_size, img_optype, redboot;
3535 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3536 const u8 *p = fw->data;
3537 struct flash_section_info *fsec = NULL;
3538
3539 filehdr_size = sizeof(struct flash_file_hdr_g3);
3540 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3541 if (!fsec) {
3542 dev_err(&adapter->pdev->dev,
3543 "Invalid Cookie. UFI corrupted ?\n");
3544 return -1;
3545 }
3546
3547 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3548 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3549 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3550
3551 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3552 case IMAGE_FIRMWARE_iSCSI:
3553 img_optype = OPTYPE_ISCSI_ACTIVE;
3554 break;
3555 case IMAGE_BOOT_CODE:
3556 img_optype = OPTYPE_REDBOOT;
3557 break;
3558 case IMAGE_OPTION_ROM_ISCSI:
3559 img_optype = OPTYPE_BIOS;
3560 break;
3561 case IMAGE_OPTION_ROM_PXE:
3562 img_optype = OPTYPE_PXE_BIOS;
3563 break;
3564 case IMAGE_OPTION_ROM_FCoE:
3565 img_optype = OPTYPE_FCOE_BIOS;
3566 break;
3567 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3568 img_optype = OPTYPE_ISCSI_BACKUP;
3569 break;
3570 case IMAGE_NCSI:
3571 img_optype = OPTYPE_NCSI_FW;
3572 break;
3573 default:
3574 continue;
3575 }
3576
3577 if (img_optype == OPTYPE_REDBOOT) {
3578 redboot = be_flash_redboot(adapter, fw->data,
3579 img_offset, img_size,
3580 filehdr_size + img_hdrs_size);
3581 if (!redboot)
3582 continue;
3583 }
3584
3585 p = fw->data;
3586 p += filehdr_size + img_offset + img_hdrs_size;
3587 if (p + img_size > fw->data + fw->size)
3588 return -1;
3589
3590 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3591 if (status) {
3592 dev_err(&adapter->pdev->dev,
3593 "Flashing section type %d failed.\n",
3594 fsec->fsec_entry[i].type);
3595 return status;
3596 }
3597 }
3598 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003599}
3600
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003601static int lancer_fw_download(struct be_adapter *adapter,
3602 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003603{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003604#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3605#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3606 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003607 const u8 *data_ptr = NULL;
3608 u8 *dest_image_ptr = NULL;
3609 size_t image_size = 0;
3610 u32 chunk_size = 0;
3611 u32 data_written = 0;
3612 u32 offset = 0;
3613 int status = 0;
3614 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003615 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003616
3617 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3618 dev_err(&adapter->pdev->dev,
3619 "FW Image not properly aligned. "
3620 "Length must be 4 byte aligned.\n");
3621 status = -EINVAL;
3622 goto lancer_fw_exit;
3623 }
3624
3625 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3626 + LANCER_FW_DOWNLOAD_CHUNK;
3627 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003628 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003629 if (!flash_cmd.va) {
3630 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003631 goto lancer_fw_exit;
3632 }
3633
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003634 dest_image_ptr = flash_cmd.va +
3635 sizeof(struct lancer_cmd_req_write_object);
3636 image_size = fw->size;
3637 data_ptr = fw->data;
3638
3639 while (image_size) {
3640 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3641
3642 /* Copy the image chunk content. */
3643 memcpy(dest_image_ptr, data_ptr, chunk_size);
3644
3645 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003646 chunk_size, offset,
3647 LANCER_FW_DOWNLOAD_LOCATION,
3648 &data_written, &change_status,
3649 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003650 if (status)
3651 break;
3652
3653 offset += data_written;
3654 data_ptr += data_written;
3655 image_size -= data_written;
3656 }
3657
3658 if (!status) {
3659 /* Commit the FW written */
3660 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003661 0, offset,
3662 LANCER_FW_DOWNLOAD_LOCATION,
3663 &data_written, &change_status,
3664 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003665 }
3666
3667 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3668 flash_cmd.dma);
3669 if (status) {
3670 dev_err(&adapter->pdev->dev,
3671 "Firmware load error. "
3672 "Status code: 0x%x Additional Status: 0x%x\n",
3673 status, add_status);
3674 goto lancer_fw_exit;
3675 }
3676
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003677 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003678 status = lancer_physdev_ctrl(adapter,
3679 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003680 if (status) {
3681 dev_err(&adapter->pdev->dev,
3682 "Adapter busy for FW reset.\n"
3683 "New FW will not be active.\n");
3684 goto lancer_fw_exit;
3685 }
3686 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3687 dev_err(&adapter->pdev->dev,
3688 "System reboot required for new FW"
3689 " to be active\n");
3690 }
3691
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003692 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3693lancer_fw_exit:
3694 return status;
3695}
3696
Sathya Perlaca34fe32012-11-06 17:48:56 +00003697#define UFI_TYPE2 2
3698#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003699#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003700#define UFI_TYPE4 4
3701static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003702 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003703{
3704 if (fhdr == NULL)
3705 goto be_get_ufi_exit;
3706
Sathya Perlaca34fe32012-11-06 17:48:56 +00003707 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3708 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003709 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3710 if (fhdr->asic_type_rev == 0x10)
3711 return UFI_TYPE3R;
3712 else
3713 return UFI_TYPE3;
3714 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003715 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003716
3717be_get_ufi_exit:
3718 dev_err(&adapter->pdev->dev,
3719 "UFI and Interface are not compatible for flashing\n");
3720 return -1;
3721}
3722
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003723static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3724{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003725 struct flash_file_hdr_g3 *fhdr3;
3726 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003727 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003728 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003729 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003730
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003731 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003732 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3733 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003734 if (!flash_cmd.va) {
3735 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003736 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003737 }
3738
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003739 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003740 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003741
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003742 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003743
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003744 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3745 for (i = 0; i < num_imgs; i++) {
3746 img_hdr_ptr = (struct image_hdr *)(fw->data +
3747 (sizeof(struct flash_file_hdr_g3) +
3748 i * sizeof(struct image_hdr)));
3749 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003750 switch (ufi_type) {
3751 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003752 status = be_flash_skyhawk(adapter, fw,
3753 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003754 break;
3755 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003756 status = be_flash_BEx(adapter, fw, &flash_cmd,
3757 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003758 break;
3759 case UFI_TYPE3:
3760 /* Do not flash this ufi on BE3-R cards */
3761 if (adapter->asic_rev < 0x10)
3762 status = be_flash_BEx(adapter, fw,
3763 &flash_cmd,
3764 num_imgs);
3765 else {
3766 status = -1;
3767 dev_err(&adapter->pdev->dev,
3768 "Can't load BE3 UFI on BE3R\n");
3769 }
3770 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003771 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003772 }
3773
Sathya Perlaca34fe32012-11-06 17:48:56 +00003774 if (ufi_type == UFI_TYPE2)
3775 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003776 else if (ufi_type == -1)
3777 status = -1;
3778
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003779 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3780 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003781 if (status) {
3782 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003783 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003784 }
3785
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003786 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003787
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003788be_fw_exit:
3789 return status;
3790}
3791
3792int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3793{
3794 const struct firmware *fw;
3795 int status;
3796
3797 if (!netif_running(adapter->netdev)) {
3798 dev_err(&adapter->pdev->dev,
3799 "Firmware load not allowed (interface is down)\n");
3800 return -1;
3801 }
3802
3803 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3804 if (status)
3805 goto fw_exit;
3806
3807 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3808
3809 if (lancer_chip(adapter))
3810 status = lancer_fw_download(adapter, fw);
3811 else
3812 status = be_fw_download(adapter, fw);
3813
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003814 if (!status)
3815 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3816 adapter->fw_on_flash);
3817
Ajit Khaparde84517482009-09-04 03:12:16 +00003818fw_exit:
3819 release_firmware(fw);
3820 return status;
3821}
3822
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003823static int be_ndo_bridge_setlink(struct net_device *dev,
3824 struct nlmsghdr *nlh)
3825{
3826 struct be_adapter *adapter = netdev_priv(dev);
3827 struct nlattr *attr, *br_spec;
3828 int rem;
3829 int status = 0;
3830 u16 mode = 0;
3831
3832 if (!sriov_enabled(adapter))
3833 return -EOPNOTSUPP;
3834
3835 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3836
3837 nla_for_each_nested(attr, br_spec, rem) {
3838 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3839 continue;
3840
3841 mode = nla_get_u16(attr);
3842 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3843 return -EINVAL;
3844
3845 status = be_cmd_set_hsw_config(adapter, 0, 0,
3846 adapter->if_handle,
3847 mode == BRIDGE_MODE_VEPA ?
3848 PORT_FWD_TYPE_VEPA :
3849 PORT_FWD_TYPE_VEB);
3850 if (status)
3851 goto err;
3852
3853 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3854 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3855
3856 return status;
3857 }
3858err:
3859 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3860 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3861
3862 return status;
3863}
3864
3865static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3866 struct net_device *dev,
3867 u32 filter_mask)
3868{
3869 struct be_adapter *adapter = netdev_priv(dev);
3870 int status = 0;
3871 u8 hsw_mode;
3872
3873 if (!sriov_enabled(adapter))
3874 return 0;
3875
3876 /* BE and Lancer chips support VEB mode only */
3877 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3878 hsw_mode = PORT_FWD_TYPE_VEB;
3879 } else {
3880 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3881 adapter->if_handle, &hsw_mode);
3882 if (status)
3883 return 0;
3884 }
3885
3886 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3887 hsw_mode == PORT_FWD_TYPE_VEPA ?
3888 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3889}
3890
stephen hemmingere5686ad2012-01-05 19:10:25 +00003891static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892 .ndo_open = be_open,
3893 .ndo_stop = be_close,
3894 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003895 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003896 .ndo_set_mac_address = be_mac_addr_set,
3897 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003898 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003899 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003900 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3901 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003902 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003903 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003904 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003905 .ndo_get_vf_config = be_get_vf_config,
3906#ifdef CONFIG_NET_POLL_CONTROLLER
3907 .ndo_poll_controller = be_netpoll,
3908#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003909 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3910 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911};
3912
3913static void be_netdev_init(struct net_device *netdev)
3914{
3915 struct be_adapter *adapter = netdev_priv(netdev);
3916
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003917 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003918 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003919 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003920 if (be_multi_rxq(adapter))
3921 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003922
3923 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003924 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003925
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003926 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003927 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003928
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003929 netdev->priv_flags |= IFF_UNICAST_FLT;
3930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003931 netdev->flags |= IFF_MULTICAST;
3932
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003933 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003934
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003935 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003936
3937 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003938}
3939
3940static void be_unmap_pci_bars(struct be_adapter *adapter)
3941{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003942 if (adapter->csr)
3943 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003944 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003945 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003946}
3947
Sathya Perlace66f782012-11-06 17:48:58 +00003948static int db_bar(struct be_adapter *adapter)
3949{
3950 if (lancer_chip(adapter) || !be_physfn(adapter))
3951 return 0;
3952 else
3953 return 4;
3954}
3955
3956static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003957{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003958 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003959 adapter->roce_db.size = 4096;
3960 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3961 db_bar(adapter));
3962 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3963 db_bar(adapter));
3964 }
Parav Pandit045508a2012-03-26 14:27:13 +00003965 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003966}
3967
3968static int be_map_pci_bars(struct be_adapter *adapter)
3969{
3970 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003971 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972
Sathya Perlace66f782012-11-06 17:48:58 +00003973 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3974 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3975 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003976
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003977 if (BEx_chip(adapter) && be_physfn(adapter)) {
3978 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3979 if (adapter->csr == NULL)
3980 return -ENOMEM;
3981 }
3982
Sathya Perlace66f782012-11-06 17:48:58 +00003983 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003984 if (addr == NULL)
3985 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003986 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003987
3988 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003989 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991pci_map_err:
3992 be_unmap_pci_bars(adapter);
3993 return -ENOMEM;
3994}
3995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003996static void be_ctrl_cleanup(struct be_adapter *adapter)
3997{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003998 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999
4000 be_unmap_pci_bars(adapter);
4001
4002 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004003 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4004 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004005
Sathya Perla5b8821b2011-08-02 19:57:44 +00004006 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004007 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004008 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4009 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004010}
4011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004012static int be_ctrl_init(struct be_adapter *adapter)
4013{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004014 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4015 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004016 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004017 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004018 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004019
Sathya Perlace66f782012-11-06 17:48:58 +00004020 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4021 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4022 SLI_INTF_FAMILY_SHIFT;
4023 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4024
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004025 status = be_map_pci_bars(adapter);
4026 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004027 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004028
4029 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004030 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4031 mbox_mem_alloc->size,
4032 &mbox_mem_alloc->dma,
4033 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004034 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004035 status = -ENOMEM;
4036 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004037 }
4038 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4039 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4040 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4041 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004042
Sathya Perla5b8821b2011-08-02 19:57:44 +00004043 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004044 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4045 rx_filter->size, &rx_filter->dma,
4046 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004047 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004048 status = -ENOMEM;
4049 goto free_mbox;
4050 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004051
Ivan Vecera29849612010-12-14 05:43:19 +00004052 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004053 spin_lock_init(&adapter->mcc_lock);
4054 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004055
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07004056 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004057 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004058 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004059
4060free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004061 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4062 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004063
4064unmap_pci_bars:
4065 be_unmap_pci_bars(adapter);
4066
4067done:
4068 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004069}
4070
4071static void be_stats_cleanup(struct be_adapter *adapter)
4072{
Sathya Perla3abcded2010-10-03 22:12:27 -07004073 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004074
4075 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004076 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4077 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004078}
4079
4080static int be_stats_init(struct be_adapter *adapter)
4081{
Sathya Perla3abcded2010-10-03 22:12:27 -07004082 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004083
Sathya Perlaca34fe32012-11-06 17:48:56 +00004084 if (lancer_chip(adapter))
4085 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4086 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004087 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004088 else
4089 /* BE3 and Skyhawk */
4090 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4091
Joe Perchesede23fa82013-08-26 22:45:23 -07004092 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4093 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004094 if (cmd->va == NULL)
4095 return -1;
4096 return 0;
4097}
4098
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004099static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100{
4101 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004102
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004103 if (!adapter)
4104 return;
4105
Parav Pandit045508a2012-03-26 14:27:13 +00004106 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004107 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004108
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004109 cancel_delayed_work_sync(&adapter->func_recovery_work);
4110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004111 unregister_netdev(adapter->netdev);
4112
Sathya Perla5fb379e2009-06-18 00:02:59 +00004113 be_clear(adapter);
4114
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004115 /* tell fw we're done with firing cmds */
4116 be_cmd_fw_clean(adapter);
4117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004118 be_stats_cleanup(adapter);
4119
4120 be_ctrl_cleanup(adapter);
4121
Sathya Perlad6b6d982012-09-05 01:56:48 +00004122 pci_disable_pcie_error_reporting(pdev);
4123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004124 pci_set_drvdata(pdev, NULL);
4125 pci_release_regions(pdev);
4126 pci_disable_device(pdev);
4127
4128 free_netdev(adapter->netdev);
4129}
4130
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004131bool be_is_wol_supported(struct be_adapter *adapter)
4132{
4133 return ((adapter->wol_cap & BE_WOL_CAP) &&
4134 !be_is_wol_excluded(adapter)) ? true : false;
4135}
4136
Somnath Kotur941a77d2012-05-17 22:59:03 +00004137u32 be_get_fw_log_level(struct be_adapter *adapter)
4138{
4139 struct be_dma_mem extfat_cmd;
4140 struct be_fat_conf_params *cfgs;
4141 int status;
4142 u32 level = 0;
4143 int j;
4144
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004145 if (lancer_chip(adapter))
4146 return 0;
4147
Somnath Kotur941a77d2012-05-17 22:59:03 +00004148 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4149 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4150 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4151 &extfat_cmd.dma);
4152
4153 if (!extfat_cmd.va) {
4154 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4155 __func__);
4156 goto err;
4157 }
4158
4159 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4160 if (!status) {
4161 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4162 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004163 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004164 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4165 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4166 }
4167 }
4168 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4169 extfat_cmd.dma);
4170err:
4171 return level;
4172}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004173
Sathya Perla39f1d942012-05-08 19:41:24 +00004174static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004175{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004176 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004177 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004178
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004179 status = be_cmd_get_cntl_attributes(adapter);
4180 if (status)
4181 return status;
4182
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004183 status = be_cmd_get_acpi_wol_cap(adapter);
4184 if (status) {
4185 /* in case of a failure to get wol capabillities
4186 * check the exclusion list to determine WOL capability */
4187 if (!be_is_wol_excluded(adapter))
4188 adapter->wol_cap |= BE_WOL_CAP;
4189 }
4190
4191 if (be_is_wol_supported(adapter))
4192 adapter->wol = true;
4193
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004194 /* Must be a power of 2 or else MODULO will BUG_ON */
4195 adapter->be_get_temp_freq = 64;
4196
Somnath Kotur941a77d2012-05-17 22:59:03 +00004197 level = be_get_fw_log_level(adapter);
4198 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4199
Sathya Perla92bf14a2013-08-27 16:57:32 +05304200 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004201 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004202}
4203
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004204static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004205{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004206 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004207 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004208
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004209 status = lancer_test_and_set_rdy_state(adapter);
4210 if (status)
4211 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004212
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004213 if (netif_running(adapter->netdev))
4214 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004215
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004216 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004217
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004218 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004219
4220 status = be_setup(adapter);
4221 if (status)
4222 goto err;
4223
4224 if (netif_running(adapter->netdev)) {
4225 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004226 if (status)
4227 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004228 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004229
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004230 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004231 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004232err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004233 if (status == -EAGAIN)
4234 dev_err(dev, "Waiting for resource provisioning\n");
4235 else
4236 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004237
4238 return status;
4239}
4240
4241static void be_func_recovery_task(struct work_struct *work)
4242{
4243 struct be_adapter *adapter =
4244 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004245 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004246
4247 be_detect_error(adapter);
4248
4249 if (adapter->hw_error && lancer_chip(adapter)) {
4250
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004251 rtnl_lock();
4252 netif_device_detach(adapter->netdev);
4253 rtnl_unlock();
4254
4255 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004256 if (!status)
4257 netif_device_attach(adapter->netdev);
4258 }
4259
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004260 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4261 * no need to attempt further recovery.
4262 */
4263 if (!status || status == -EAGAIN)
4264 schedule_delayed_work(&adapter->func_recovery_work,
4265 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004266}
4267
4268static void be_worker(struct work_struct *work)
4269{
4270 struct be_adapter *adapter =
4271 container_of(work, struct be_adapter, work.work);
4272 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004273 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004274 int i;
4275
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004276 /* when interrupts are not yet enabled, just reap any pending
4277 * mcc completions */
4278 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004279 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004280 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004281 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004282 goto reschedule;
4283 }
4284
4285 if (!adapter->stats_cmd_sent) {
4286 if (lancer_chip(adapter))
4287 lancer_cmd_get_pport_stats(adapter,
4288 &adapter->stats_cmd);
4289 else
4290 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4291 }
4292
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304293 if (be_physfn(adapter) &&
4294 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004295 be_cmd_get_die_temperature(adapter);
4296
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004297 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004298 if (rxo->rx_post_starved) {
4299 rxo->rx_post_starved = false;
4300 be_post_rx_frags(rxo, GFP_KERNEL);
4301 }
4302 }
4303
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004304 for_all_evt_queues(adapter, eqo, i)
4305 be_eqd_update(adapter, eqo);
4306
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004307reschedule:
4308 adapter->work_counter++;
4309 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4310}
4311
Sathya Perla257a3fe2013-06-14 15:54:51 +05304312/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004313static bool be_reset_required(struct be_adapter *adapter)
4314{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304315 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004316}
4317
Sathya Perlad3791422012-09-28 04:39:44 +00004318static char *mc_name(struct be_adapter *adapter)
4319{
4320 if (adapter->function_mode & FLEX10_MODE)
4321 return "FLEX10";
4322 else if (adapter->function_mode & VNIC_MODE)
4323 return "vNIC";
4324 else if (adapter->function_mode & UMC_ENABLED)
4325 return "UMC";
4326 else
4327 return "";
4328}
4329
4330static inline char *func_name(struct be_adapter *adapter)
4331{
4332 return be_physfn(adapter) ? "PF" : "VF";
4333}
4334
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004335static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004336{
4337 int status = 0;
4338 struct be_adapter *adapter;
4339 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004340 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004341
4342 status = pci_enable_device(pdev);
4343 if (status)
4344 goto do_none;
4345
4346 status = pci_request_regions(pdev, DRV_NAME);
4347 if (status)
4348 goto disable_dev;
4349 pci_set_master(pdev);
4350
Sathya Perla7f640062012-06-05 19:37:20 +00004351 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004352 if (netdev == NULL) {
4353 status = -ENOMEM;
4354 goto rel_reg;
4355 }
4356 adapter = netdev_priv(netdev);
4357 adapter->pdev = pdev;
4358 pci_set_drvdata(pdev, adapter);
4359 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004360 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004361
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004362 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004363 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004364 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4365 if (status < 0) {
4366 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4367 goto free_netdev;
4368 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004369 netdev->features |= NETIF_F_HIGHDMA;
4370 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004371 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304372 if (!status)
4373 status = dma_set_coherent_mask(&pdev->dev,
4374 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004375 if (status) {
4376 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4377 goto free_netdev;
4378 }
4379 }
4380
Sathya Perlad6b6d982012-09-05 01:56:48 +00004381 status = pci_enable_pcie_error_reporting(pdev);
4382 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004383 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004384
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004385 status = be_ctrl_init(adapter);
4386 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004387 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388
Sathya Perla2243e2e2009-11-22 22:02:03 +00004389 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004390 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004391 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004392 if (status)
4393 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004394 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004395
Sathya Perla39f1d942012-05-08 19:41:24 +00004396 if (be_reset_required(adapter)) {
4397 status = be_cmd_reset_function(adapter);
4398 if (status)
4399 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004400
Kalesh AP2d177be2013-04-28 22:22:29 +00004401 /* Wait for interrupts to quiesce after an FLR */
4402 msleep(100);
4403 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004404
4405 /* Allow interrupts for other ULPs running on NIC function */
4406 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004407
Kalesh AP2d177be2013-04-28 22:22:29 +00004408 /* tell fw we're ready to fire cmds */
4409 status = be_cmd_fw_init(adapter);
4410 if (status)
4411 goto ctrl_clean;
4412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004413 status = be_stats_init(adapter);
4414 if (status)
4415 goto ctrl_clean;
4416
Sathya Perla39f1d942012-05-08 19:41:24 +00004417 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004418 if (status)
4419 goto stats_clean;
4420
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004421 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004422 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004423 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004424
Sathya Perla5fb379e2009-06-18 00:02:59 +00004425 status = be_setup(adapter);
4426 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004427 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004428
Sathya Perla3abcded2010-10-03 22:12:27 -07004429 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004430 status = register_netdev(netdev);
4431 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004432 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433
Parav Pandit045508a2012-03-26 14:27:13 +00004434 be_roce_dev_add(adapter);
4435
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004436 schedule_delayed_work(&adapter->func_recovery_work,
4437 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004438
4439 be_cmd_query_port_name(adapter, &port_name);
4440
Sathya Perlad3791422012-09-28 04:39:44 +00004441 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4442 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004443
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004444 return 0;
4445
Sathya Perla5fb379e2009-06-18 00:02:59 +00004446unsetup:
4447 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004448stats_clean:
4449 be_stats_cleanup(adapter);
4450ctrl_clean:
4451 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004452free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004453 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004454 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004455rel_reg:
4456 pci_release_regions(pdev);
4457disable_dev:
4458 pci_disable_device(pdev);
4459do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004460 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004461 return status;
4462}
4463
4464static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4465{
4466 struct be_adapter *adapter = pci_get_drvdata(pdev);
4467 struct net_device *netdev = adapter->netdev;
4468
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004469 if (adapter->wol)
4470 be_setup_wol(adapter, true);
4471
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004472 cancel_delayed_work_sync(&adapter->func_recovery_work);
4473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004474 netif_device_detach(netdev);
4475 if (netif_running(netdev)) {
4476 rtnl_lock();
4477 be_close(netdev);
4478 rtnl_unlock();
4479 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004480 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004481
4482 pci_save_state(pdev);
4483 pci_disable_device(pdev);
4484 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4485 return 0;
4486}
4487
4488static int be_resume(struct pci_dev *pdev)
4489{
4490 int status = 0;
4491 struct be_adapter *adapter = pci_get_drvdata(pdev);
4492 struct net_device *netdev = adapter->netdev;
4493
4494 netif_device_detach(netdev);
4495
4496 status = pci_enable_device(pdev);
4497 if (status)
4498 return status;
4499
Yijing Wang1ca01512013-06-27 20:53:42 +08004500 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004501 pci_restore_state(pdev);
4502
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304503 status = be_fw_wait_ready(adapter);
4504 if (status)
4505 return status;
4506
Sathya Perla2243e2e2009-11-22 22:02:03 +00004507 /* tell fw we're ready to fire cmds */
4508 status = be_cmd_fw_init(adapter);
4509 if (status)
4510 return status;
4511
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004512 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004513 if (netif_running(netdev)) {
4514 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515 be_open(netdev);
4516 rtnl_unlock();
4517 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004518
4519 schedule_delayed_work(&adapter->func_recovery_work,
4520 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004522
4523 if (adapter->wol)
4524 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004525
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526 return 0;
4527}
4528
Sathya Perla82456b02010-02-17 01:35:37 +00004529/*
4530 * An FLR will stop BE from DMAing any data.
4531 */
4532static void be_shutdown(struct pci_dev *pdev)
4533{
4534 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004535
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004536 if (!adapter)
4537 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004538
Sathya Perla0f4a6822011-03-21 20:49:28 +00004539 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004540 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004541
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004542 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004543
Ajit Khaparde57841862011-04-06 18:08:43 +00004544 be_cmd_reset_function(adapter);
4545
Sathya Perla82456b02010-02-17 01:35:37 +00004546 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004547}
4548
Sathya Perlacf588472010-02-14 21:22:01 +00004549static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4550 pci_channel_state_t state)
4551{
4552 struct be_adapter *adapter = pci_get_drvdata(pdev);
4553 struct net_device *netdev = adapter->netdev;
4554
4555 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4556
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004557 if (!adapter->eeh_error) {
4558 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004559
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004560 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004561
Sathya Perlacf588472010-02-14 21:22:01 +00004562 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004563 netif_device_detach(netdev);
4564 if (netif_running(netdev))
4565 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004566 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004567
4568 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004569 }
Sathya Perlacf588472010-02-14 21:22:01 +00004570
4571 if (state == pci_channel_io_perm_failure)
4572 return PCI_ERS_RESULT_DISCONNECT;
4573
4574 pci_disable_device(pdev);
4575
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004576 /* The error could cause the FW to trigger a flash debug dump.
4577 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004578 * can cause it not to recover; wait for it to finish.
4579 * Wait only for first function as it is needed only once per
4580 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004581 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004582 if (pdev->devfn == 0)
4583 ssleep(30);
4584
Sathya Perlacf588472010-02-14 21:22:01 +00004585 return PCI_ERS_RESULT_NEED_RESET;
4586}
4587
4588static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4589{
4590 struct be_adapter *adapter = pci_get_drvdata(pdev);
4591 int status;
4592
4593 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004594
4595 status = pci_enable_device(pdev);
4596 if (status)
4597 return PCI_ERS_RESULT_DISCONNECT;
4598
4599 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004600 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004601 pci_restore_state(pdev);
4602
4603 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004604 dev_info(&adapter->pdev->dev,
4605 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004606 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004607 if (status)
4608 return PCI_ERS_RESULT_DISCONNECT;
4609
Sathya Perlad6b6d982012-09-05 01:56:48 +00004610 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004611 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004612 return PCI_ERS_RESULT_RECOVERED;
4613}
4614
4615static void be_eeh_resume(struct pci_dev *pdev)
4616{
4617 int status = 0;
4618 struct be_adapter *adapter = pci_get_drvdata(pdev);
4619 struct net_device *netdev = adapter->netdev;
4620
4621 dev_info(&adapter->pdev->dev, "EEH resume\n");
4622
4623 pci_save_state(pdev);
4624
Kalesh AP2d177be2013-04-28 22:22:29 +00004625 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004626 if (status)
4627 goto err;
4628
Kalesh AP2d177be2013-04-28 22:22:29 +00004629 /* tell fw we're ready to fire cmds */
4630 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004631 if (status)
4632 goto err;
4633
Sathya Perlacf588472010-02-14 21:22:01 +00004634 status = be_setup(adapter);
4635 if (status)
4636 goto err;
4637
4638 if (netif_running(netdev)) {
4639 status = be_open(netdev);
4640 if (status)
4641 goto err;
4642 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004643
4644 schedule_delayed_work(&adapter->func_recovery_work,
4645 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004646 netif_device_attach(netdev);
4647 return;
4648err:
4649 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004650}
4651
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004652static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004653 .error_detected = be_eeh_err_detected,
4654 .slot_reset = be_eeh_reset,
4655 .resume = be_eeh_resume,
4656};
4657
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004658static struct pci_driver be_driver = {
4659 .name = DRV_NAME,
4660 .id_table = be_dev_ids,
4661 .probe = be_probe,
4662 .remove = be_remove,
4663 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004664 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004665 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004666 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004667};
4668
4669static int __init be_init_module(void)
4670{
Joe Perches8e95a202009-12-03 07:58:21 +00004671 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4672 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004673 printk(KERN_WARNING DRV_NAME
4674 " : Module param rx_frag_size must be 2048/4096/8192."
4675 " Using 2048\n");
4676 rx_frag_size = 2048;
4677 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004678
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004679 return pci_register_driver(&be_driver);
4680}
4681module_init(be_init_module);
4682
4683static void __exit be_exit_module(void)
4684{
4685 pci_unregister_driver(&be_driver);
4686}
4687module_exit(be_exit_module);