blob: 2c38cc402119c763021ea77461455fd0fa8ac035 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070025
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000029MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030MODULE_LICENSE("GPL");
31
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla11ac75e2011-12-13 00:58:50 +000036static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070049 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000052/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070053static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000054 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070088static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000089 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700112 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122
Sathya Perla752961a2011-10-24 02:45:03 +0000123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000136 mem->va = NULL;
137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000152 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153 return 0;
154}
155
Somnath Kotur68c45a22013-03-14 02:42:07 +0000156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Somnath Kotur68c45a22013-03-14 02:42:07 +0000175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000207
208 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000220 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Sathya Perla5a712c12013-07-23 15:24:59 +0530260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000265 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 }
278
Sathya Perla5a712c12013-07-23 15:24:59 +0530279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000283 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000284 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 return 0;
297err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 return status;
300}
301
Sathya Perlaca34fe32012-11-06 17:48:56 +0000302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314}
315
316/* BE2 supports only v0 cmd */
317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318{
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328}
329
330static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000331{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000335 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000338
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377}
378
Sathya Perlaca34fe32012-11-06 17:48:56 +0000379static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423}
424
Selvin Xavier005d5692011-05-16 07:36:35 +0000425static void populate_lancer_stats(struct be_adapter *adapter)
426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427
Selvin Xavier005d5692011-05-16 07:36:35 +0000428 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000457 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000460 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000461 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000462}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463
Sathya Perla09c1c682011-08-22 19:41:53 +0000464static void accumulate_16bit_val(u32 *acc, u16 val)
465{
466#define lo(x) (x & 0xFFFF)
467#define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474}
475
Jingoo Han4188e7d2013-08-05 18:02:02 +0900476static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479{
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488}
489
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490void be_parse_stats(struct be_adapter *adapter)
491{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000495 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000496
Sathya Perlaca34fe32012-11-06 17:48:56 +0000497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505
Sathya Perlaca34fe32012-11-06 17:48:56 +0000506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000510 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000511 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000512}
513
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000517 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000518 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700519 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000520 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000521 u64 pkts, bytes;
522 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700523 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524
Sathya Perla3abcded2010-10-03 22:12:27 -0700525 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700537 }
538
Sathya Perla3c8def92011-06-12 20:01:58 +0000539 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000548 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700549
550 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000560 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000563 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000566
Sathya Perlaab1594e2011-07-25 19:10:15 +0000567 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568
569 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000571
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000577 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578}
579
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 struct net_device *netdev = adapter->netdev;
583
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000585 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593}
594
Sathya Perla3c8def92011-06-12 20:01:58 +0000595static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597{
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_stats *stats = tx_stats(txo);
599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000606 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608}
609
610/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 /* to account for hdr wrb */
619 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000626 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629}
630
631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632{
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000636 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637}
638
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641{
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653}
654
Somnath Koturcc4ce022010-10-21 07:11:14 -0700655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000658 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000664 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700677 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688}
689
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000691 bool unmap_single)
692{
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000698 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000699 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000702 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000704 }
705}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Sathya Perla3c8def92011-06-12 20:01:58 +0000707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710{
Sathya Perla7101e112010-03-22 20:41:12 +0000711 dma_addr_t busaddr;
712 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000713 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000717 bool map_single = false;
718 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000722 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723
David S. Millerebc8d2a2009-06-09 01:01:31 -0700724 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700725 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000728 goto dma_err;
729 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
David S. Millerebc8d2a2009-06-09 01:01:31 -0700737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000738 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700739 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000740 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000741 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000742 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000743 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700744 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000748 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000762dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Somnath Kotur93040ae2012-06-26 22:32:10 +0000774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000777{
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000784 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000796
797 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000799 if (unlikely(!skb))
800 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
Somnath Kotur93040ae2012-06-26 22:32:10 +0000814 return skb;
815}
816
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818{
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837}
838
839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840{
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842}
843
Sathya Perlaee9c7992013-05-22 23:04:55 +0000844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000846{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000848}
849
Sathya Perlaee9c7992013-05-22 23:04:55 +0000850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000857
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000859 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500860 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000861 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000870 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000871 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000876 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
880
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000886 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000887
Somnath Kotur93040ae2012-06-26 22:32:10 +0000888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000917 if (unlikely(!skb))
918 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000919 }
920
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 return skb;
922tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925}
926
927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928{
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb)
939 return NETDEV_TX_OK;
940
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000943 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000945 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000946 int gso_segs = skb_shinfo(skb)->gso_segs;
947
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000948 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000949 BUG_ON(txo->sent_skb_list[start]);
950 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000952 /* Ensure txq has space for the next skb; Else stop the queue
953 * *BEFORE* ringing the tx doorbell, so that we serialze the
954 * tx compls of the current transmit which'll wake up the queue
955 */
Sathya Perla7101e112010-03-22 20:41:12 +0000956 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000957 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000959 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000960 stopped = true;
961 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000963 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000964
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000966 } else {
967 txq->head = start;
968 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970 return NETDEV_TX_OK;
971}
972
973static int be_change_mtu(struct net_device *netdev, int new_mtu)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000977 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700979 dev_info(&adapter->pdev->dev,
980 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000981 BE_MIN_MTU,
982 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983 return -EINVAL;
984 }
985 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986 netdev->mtu, new_mtu);
987 netdev->mtu = new_mtu;
988 return 0;
989}
990
991/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700994 */
Sathya Perla10329df2012-06-05 19:37:18 +0000995static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996{
Sathya Perla10329df2012-06-05 19:37:18 +0000997 u16 vids[BE_NUM_VLANS_SUPPORTED];
998 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000999 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001000
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001001 /* No need to further configure vids if in promiscuous mode */
1002 if (adapter->promiscuous)
1003 return 0;
1004
Sathya Perla92bf14a2013-08-27 16:57:32 +05301005 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001006 goto set_vlan_promisc;
1007
1008 /* Construct VLAN Table to give to HW */
1009 for (i = 0; i < VLAN_N_VID; i++)
1010 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001011 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001012
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001014 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001015
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001016 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001036
Sathya Perlab31c50a2009-09-17 10:30:13 -07001037 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001038
1039set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001050 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051}
1052
Patrick McHardy80d5c362013-04-19 02:04:28 +00001053static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054{
1055 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001056 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001058
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001059 /* Packets with VID 0 are always received by Lancer by default */
1060 if (lancer_chip(adapter) && vid == 0)
1061 goto ret;
1062
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301064 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001065 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001066
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001067 if (!status)
1068 adapter->vlans_added++;
1069 else
1070 adapter->vlan_tag[vid] = 0;
1071ret:
1072 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073}
1074
Patrick McHardy80d5c362013-04-19 02:04:28 +00001075static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076{
1077 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001078 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001080 /* Packets with VID 0 are always received by Lancer by default */
1081 if (lancer_chip(adapter) && vid == 0)
1082 goto ret;
1083
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301085 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001086 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001087
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001088 if (!status)
1089 adapter->vlans_added--;
1090 else
1091 adapter->vlan_tag[vid] = 1;
1092ret:
1093 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094}
1095
Sathya Perlaa54769f2011-10-24 02:45:00 +00001096static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097{
1098 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001099 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100
1101 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001102 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001103 adapter->promiscuous = true;
1104 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001106
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001107 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001108 if (adapter->promiscuous) {
1109 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001110 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001111
1112 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001113 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001114 }
1115
Sathya Perlae7b909a2009-11-22 22:01:10 +00001116 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001117 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301118 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001119 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001120 goto done;
1121 }
1122
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001123 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1124 struct netdev_hw_addr *ha;
1125 int i = 1; /* First slot is claimed by the Primary MAC */
1126
1127 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1128 be_cmd_pmac_del(adapter, adapter->if_handle,
1129 adapter->pmac_id[i], 0);
1130 }
1131
Sathya Perla92bf14a2013-08-27 16:57:32 +05301132 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001133 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1134 adapter->promiscuous = true;
1135 goto done;
1136 }
1137
1138 netdev_for_each_uc_addr(ha, adapter->netdev) {
1139 adapter->uc_macs++; /* First slot is for Primary MAC */
1140 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1141 adapter->if_handle,
1142 &adapter->pmac_id[adapter->uc_macs], 0);
1143 }
1144 }
1145
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001146 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1147
1148 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1149 if (status) {
1150 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1151 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1152 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1153 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001154done:
1155 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156}
1157
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001158static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1159{
1160 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001161 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001162 int status;
1163
Sathya Perla11ac75e2011-12-13 00:58:50 +00001164 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001165 return -EPERM;
1166
Sathya Perla11ac75e2011-12-13 00:58:50 +00001167 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001168 return -EINVAL;
1169
Sathya Perla3175d8c2013-07-23 15:25:03 +05301170 if (BEx_chip(adapter)) {
1171 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1172 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001173
Sathya Perla11ac75e2011-12-13 00:58:50 +00001174 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1175 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301176 } else {
1177 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1178 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001179 }
1180
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001181 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001182 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1183 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001184 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001185 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001186
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001187 return status;
1188}
1189
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001190static int be_get_vf_config(struct net_device *netdev, int vf,
1191 struct ifla_vf_info *vi)
1192{
1193 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001194 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001195
Sathya Perla11ac75e2011-12-13 00:58:50 +00001196 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001197 return -EPERM;
1198
Sathya Perla11ac75e2011-12-13 00:58:50 +00001199 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001200 return -EINVAL;
1201
1202 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001203 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001207
1208 return 0;
1209}
1210
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001211static int be_set_vf_vlan(struct net_device *netdev,
1212 int vf, u16 vlan, u8 qos)
1213{
1214 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001216 int status = 0;
1217
Sathya Perla11ac75e2011-12-13 00:58:50 +00001218 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001219 return -EPERM;
1220
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001222 return -EINVAL;
1223
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001224 if (vlan || qos) {
1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001227 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001228 vf_cfg->vlan_tag = vlan;
1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1230 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001231 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001232 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001233 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001234 vf_cfg->vlan_tag = 0;
1235 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001237 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001238 }
1239
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001240
1241 if (status)
1242 dev_info(&adapter->pdev->dev,
1243 "VLAN %d config on VF %d failed\n", vlan, vf);
1244 return status;
1245}
1246
Ajit Khapardee1d18732010-07-23 01:52:13 +00001247static int be_set_vf_tx_rate(struct net_device *netdev,
1248 int vf, int rate)
1249{
1250 struct be_adapter *adapter = netdev_priv(netdev);
1251 int status = 0;
1252
Sathya Perla11ac75e2011-12-13 00:58:50 +00001253 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001254 return -EPERM;
1255
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001256 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001257 return -EINVAL;
1258
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001259 if (rate < 100 || rate > 10000) {
1260 dev_err(&adapter->pdev->dev,
1261 "tx rate must be between 100 and 10000 Mbps\n");
1262 return -EINVAL;
1263 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001264
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001265 if (lancer_chip(adapter))
1266 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1267 else
1268 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001269
1270 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001271 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001272 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001273 else
1274 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001275 return status;
1276}
1277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001278static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001280 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001281 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001282 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001283 u64 pkts;
1284 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001285
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286 if (!eqo->enable_aic) {
1287 eqd = eqo->eqd;
1288 goto modify_eqd;
1289 }
1290
1291 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001292 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001294 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1295
Sathya Perla4097f662009-03-24 16:40:13 -07001296 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001297 if (time_before(now, stats->rx_jiffies)) {
1298 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001299 return;
1300 }
1301
Sathya Perlaac124ff2011-07-25 19:10:14 +00001302 /* Update once a second */
1303 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001304 return;
1305
Sathya Perlaab1594e2011-07-25 19:10:15 +00001306 do {
1307 start = u64_stats_fetch_begin_bh(&stats->sync);
1308 pkts = stats->rx_pkts;
1309 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1310
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001311 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001312 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001313 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001314 eqd = (stats->rx_pps / 110000) << 3;
1315 eqd = min(eqd, eqo->max_eqd);
1316 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001317 if (eqd < 10)
1318 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001319
1320modify_eqd:
1321 if (eqd != eqo->cur_eqd) {
1322 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1323 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001324 }
Sathya Perla4097f662009-03-24 16:40:13 -07001325}
1326
Sathya Perla3abcded2010-10-03 22:12:27 -07001327static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001328 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001329{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001330 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001331
Sathya Perlaab1594e2011-07-25 19:10:15 +00001332 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001333 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001335 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001336 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001337 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001338 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001339 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001340 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341}
1342
Sathya Perla2e588f82011-03-11 02:49:26 +00001343static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001344{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001345 /* L4 checksum is not reliable for non TCP/UDP packets.
1346 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001347 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1348 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001349}
1350
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001351static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1352 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001354 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001356 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357
Sathya Perla3abcded2010-10-03 22:12:27 -07001358 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 BUG_ON(!rx_page_info->page);
1360
Ajit Khaparde205859a2010-02-09 01:34:21 +00001361 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001362 dma_unmap_page(&adapter->pdev->dev,
1363 dma_unmap_addr(rx_page_info, bus),
1364 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001365 rx_page_info->last_page_user = false;
1366 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367
1368 atomic_dec(&rxq->used);
1369 return rx_page_info;
1370}
1371
1372/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001373static void be_rx_compl_discard(struct be_rx_obj *rxo,
1374 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375{
Sathya Perla3abcded2010-10-03 22:12:27 -07001376 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001378 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001380 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001381 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001382 put_page(page_info->page);
1383 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001384 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 }
1386}
1387
1388/*
1389 * skb_fill_rx_data forms a complete skb for an ether frame
1390 * indicated by rxcp.
1391 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001392static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1393 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394{
Sathya Perla3abcded2010-10-03 22:12:27 -07001395 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001397 u16 i, j;
1398 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 u8 *start;
1400
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001401 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 start = page_address(page_info->page) + page_info->page_offset;
1403 prefetch(start);
1404
1405 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001406 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 skb->len = curr_frag_len;
1409 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001410 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 /* Complete packet has now been moved to data */
1412 put_page(page_info->page);
1413 skb->data_len = 0;
1414 skb->tail += curr_frag_len;
1415 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001416 hdr_len = ETH_HLEN;
1417 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001419 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 skb_shinfo(skb)->frags[0].page_offset =
1421 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001422 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001424 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 skb->tail += hdr_len;
1426 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001427 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 if (rxcp->pkt_size <= rx_frag_size) {
1430 BUG_ON(rxcp->num_rcvd != 1);
1431 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432 }
1433
1434 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001435 index_inc(&rxcp->rxq_idx, rxq->len);
1436 remaining = rxcp->pkt_size - curr_frag_len;
1437 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001438 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001439 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001441 /* Coalesce all frags from the same physical page in one slot */
1442 if (page_info->page_offset == 0) {
1443 /* Fresh page */
1444 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001445 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001446 skb_shinfo(skb)->frags[j].page_offset =
1447 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001448 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001449 skb_shinfo(skb)->nr_frags++;
1450 } else {
1451 put_page(page_info->page);
1452 }
1453
Eric Dumazet9e903e02011-10-18 21:00:24 +00001454 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 skb->len += curr_frag_len;
1456 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001457 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 remaining -= curr_frag_len;
1459 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001460 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001462 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463}
1464
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001465/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001466static void be_rx_compl_process(struct be_rx_obj *rxo,
1467 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001469 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001470 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001472
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001473 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001474 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001475 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001476 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 return;
1478 }
1479
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001480 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001482 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001483 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001484 else
1485 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001487 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001488 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001489 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001490 skb->rxhash = rxcp->rss_hash;
1491
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
Jiri Pirko343e43c2011-08-25 02:50:51 +00001493 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001494 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001495
1496 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497}
1498
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001499/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001500static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1501 struct napi_struct *napi,
1502 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001504 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001506 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001507 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001508 u16 remaining, curr_frag_len;
1509 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001510
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001512 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001513 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001514 return;
1515 }
1516
Sathya Perla2e588f82011-03-11 02:49:26 +00001517 remaining = rxcp->pkt_size;
1518 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001519 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
1521 curr_frag_len = min(remaining, rx_frag_size);
1522
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001523 /* Coalesce all frags from the same physical page in one slot */
1524 if (i == 0 || page_info->page_offset == 0) {
1525 /* First frag or Fresh page */
1526 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001527 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001528 skb_shinfo(skb)->frags[j].page_offset =
1529 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001530 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001531 } else {
1532 put_page(page_info->page);
1533 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001534 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001535 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 memset(page_info, 0, sizeof(*page_info));
1539 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001540 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001542 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001543 skb->len = rxcp->pkt_size;
1544 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001545 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001546 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001547 if (adapter->netdev->features & NETIF_F_RXHASH)
1548 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001549
Jiri Pirko343e43c2011-08-25 02:50:51 +00001550 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001551 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001552
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001553 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554}
1555
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001556static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1557 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558{
Sathya Perla2e588f82011-03-11 02:49:26 +00001559 rxcp->pkt_size =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1561 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1562 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1563 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001564 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001565 rxcp->ip_csum =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1567 rxcp->l4_csum =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1569 rxcp->ipv6 =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1571 rxcp->rxq_idx =
1572 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1573 rxcp->num_rcvd =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1575 rxcp->pkt_type =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001577 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001579 if (rxcp->vlanf) {
1580 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001581 compl);
1582 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1583 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001584 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001585 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001586}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001588static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1589 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001590{
1591 rxcp->pkt_size =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1593 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1594 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1595 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001596 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001597 rxcp->ip_csum =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1599 rxcp->l4_csum =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1601 rxcp->ipv6 =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1603 rxcp->rxq_idx =
1604 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1605 rxcp->num_rcvd =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1607 rxcp->pkt_type =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001609 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001611 if (rxcp->vlanf) {
1612 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001613 compl);
1614 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1615 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001616 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001617 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001618 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1619 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001620}
1621
1622static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1623{
1624 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1625 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1626 struct be_adapter *adapter = rxo->adapter;
1627
1628 /* For checking the valid bit it is Ok to use either definition as the
1629 * valid bit is at the same position in both v0 and v1 Rx compl */
1630 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 return NULL;
1632
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001633 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001634 be_dws_le_to_cpu(compl, sizeof(*compl));
1635
1636 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001637 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001638 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001640
Somnath Koture38b1702013-05-29 22:55:56 +00001641 if (rxcp->ip_frag)
1642 rxcp->l4_csum = 0;
1643
Sathya Perla15d72182011-03-21 20:49:26 +00001644 if (rxcp->vlanf) {
1645 /* vlanf could be wrongly set in some cards.
1646 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001647 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001648 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001649
Sathya Perla15d72182011-03-21 20:49:26 +00001650 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001651 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001652
Somnath Kotur939cf302011-08-18 21:51:49 -07001653 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001654 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001655 rxcp->vlanf = 0;
1656 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001657
1658 /* As the compl has been parsed, reset it; we wont touch it again */
1659 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660
Sathya Perla3abcded2010-10-03 22:12:27 -07001661 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 return rxcp;
1663}
1664
Eric Dumazet1829b082011-03-01 05:48:12 +00001665static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001668
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001670 gfp |= __GFP_COMP;
1671 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672}
1673
1674/*
1675 * Allocate a page, split it to fragments of size rx_frag_size and post as
1676 * receive buffers to BE
1677 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001678static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679{
Sathya Perla3abcded2010-10-03 22:12:27 -07001680 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001681 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001682 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683 struct page *pagep = NULL;
1684 struct be_eth_rx_d *rxd;
1685 u64 page_dmaaddr = 0, frag_dmaaddr;
1686 u32 posted, page_offset = 0;
1687
Sathya Perla3abcded2010-10-03 22:12:27 -07001688 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1690 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001691 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001693 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694 break;
1695 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001696 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1697 0, adapter->big_page_size,
1698 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 page_info->page_offset = 0;
1700 } else {
1701 get_page(pagep);
1702 page_info->page_offset = page_offset + rx_frag_size;
1703 }
1704 page_offset = page_info->page_offset;
1705 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001706 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1708
1709 rxd = queue_head_node(rxq);
1710 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1711 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712
1713 /* Any space left in the current big page for another frag? */
1714 if ((page_offset + rx_frag_size + rx_frag_size) >
1715 adapter->big_page_size) {
1716 pagep = NULL;
1717 page_info->last_page_user = true;
1718 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001719
1720 prev_page_info = page_info;
1721 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001722 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 }
1724 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001725 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726
1727 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001729 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001730 } else if (atomic_read(&rxq->used) == 0) {
1731 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001732 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734}
1735
Sathya Perla5fb379e2009-06-18 00:02:59 +00001736static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1739
1740 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1741 return NULL;
1742
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001743 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1745
1746 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1747
1748 queue_tail_inc(tx_cq);
1749 return txcp;
1750}
1751
Sathya Perla3c8def92011-06-12 20:01:58 +00001752static u16 be_tx_compl_process(struct be_adapter *adapter,
1753 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754{
Sathya Perla3c8def92011-06-12 20:01:58 +00001755 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001756 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001757 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001759 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1760 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001762 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001764 sent_skbs[txq->tail] = NULL;
1765
1766 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001767 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001769 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001771 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001772 unmap_tx_frag(&adapter->pdev->dev, wrb,
1773 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001774 unmap_skb_hdr = false;
1775
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 num_wrbs++;
1777 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001778 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001781 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782}
1783
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784/* Return the number of events in the event queue */
1785static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001786{
1787 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001788 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001789
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001790 do {
1791 eqe = queue_tail_node(&eqo->q);
1792 if (eqe->evt == 0)
1793 break;
1794
1795 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001796 eqe->evt = 0;
1797 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798 queue_tail_inc(&eqo->q);
1799 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001800
1801 return num;
1802}
1803
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001804/* Leaves the EQ is disarmed state */
1805static void be_eq_clean(struct be_eq_obj *eqo)
1806{
1807 int num = events_get(eqo);
1808
1809 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1810}
1811
1812static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813{
1814 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001815 struct be_queue_info *rxq = &rxo->q;
1816 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001817 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001818 struct be_adapter *adapter = rxo->adapter;
1819 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820 u16 tail;
1821
Sathya Perlad23e9462012-12-17 19:38:51 +00001822 /* Consume pending rx completions.
1823 * Wait for the flush completion (identified by zero num_rcvd)
1824 * to arrive. Notify CQ even when there are no more CQ entries
1825 * for HW to flush partially coalesced CQ entries.
1826 * In Lancer, there is no need to wait for flush compl.
1827 */
1828 for (;;) {
1829 rxcp = be_rx_compl_get(rxo);
1830 if (rxcp == NULL) {
1831 if (lancer_chip(adapter))
1832 break;
1833
1834 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1835 dev_warn(&adapter->pdev->dev,
1836 "did not receive flush compl\n");
1837 break;
1838 }
1839 be_cq_notify(adapter, rx_cq->id, true, 0);
1840 mdelay(1);
1841 } else {
1842 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001843 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001844 if (rxcp->num_rcvd == 0)
1845 break;
1846 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 }
1848
Sathya Perlad23e9462012-12-17 19:38:51 +00001849 /* After cleanup, leave the CQ in unarmed state */
1850 be_cq_notify(adapter, rx_cq->id, false, 0);
1851
1852 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001854 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001855 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 put_page(page_info->page);
1857 memset(page_info, 0, sizeof(*page_info));
1858 }
1859 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001860 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861}
1862
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001863static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001865 struct be_tx_obj *txo;
1866 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001867 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001868 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001869 struct sk_buff *sent_skb;
1870 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001871 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perlaa8e91792009-08-10 03:42:43 +00001873 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1874 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001875 pending_txqs = adapter->num_tx_qs;
1876
1877 for_all_tx_queues(adapter, txo, i) {
1878 txq = &txo->q;
1879 while ((txcp = be_tx_compl_get(&txo->cq))) {
1880 end_idx =
1881 AMAP_GET_BITS(struct amap_eth_tx_compl,
1882 wrb_index, txcp);
1883 num_wrbs += be_tx_compl_process(adapter, txo,
1884 end_idx);
1885 cmpl++;
1886 }
1887 if (cmpl) {
1888 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1889 atomic_sub(num_wrbs, &txq->used);
1890 cmpl = 0;
1891 num_wrbs = 0;
1892 }
1893 if (atomic_read(&txq->used) == 0)
1894 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001895 }
1896
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001897 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001898 break;
1899
1900 mdelay(1);
1901 } while (true);
1902
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001903 for_all_tx_queues(adapter, txo, i) {
1904 txq = &txo->q;
1905 if (atomic_read(&txq->used))
1906 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1907 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001908
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001909 /* free posted tx for which compls will never arrive */
1910 while (atomic_read(&txq->used)) {
1911 sent_skb = txo->sent_skb_list[txq->tail];
1912 end_idx = txq->tail;
1913 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1914 &dummy_wrb);
1915 index_adv(&end_idx, num_wrbs - 1, txq->len);
1916 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1917 atomic_sub(num_wrbs, &txq->used);
1918 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001919 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920}
1921
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922static void be_evt_queues_destroy(struct be_adapter *adapter)
1923{
1924 struct be_eq_obj *eqo;
1925 int i;
1926
1927 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001928 if (eqo->q.created) {
1929 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301931 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001932 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001933 be_queue_free(adapter, &eqo->q);
1934 }
1935}
1936
1937static int be_evt_queues_create(struct be_adapter *adapter)
1938{
1939 struct be_queue_info *eq;
1940 struct be_eq_obj *eqo;
1941 int i, rc;
1942
Sathya Perla92bf14a2013-08-27 16:57:32 +05301943 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1944 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945
1946 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05301947 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1948 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 eqo->adapter = adapter;
1950 eqo->tx_budget = BE_TX_BUDGET;
1951 eqo->idx = i;
1952 eqo->max_eqd = BE_MAX_EQD;
1953 eqo->enable_aic = true;
1954
1955 eq = &eqo->q;
1956 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1957 sizeof(struct be_eq_entry));
1958 if (rc)
1959 return rc;
1960
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301961 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 if (rc)
1963 return rc;
1964 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001965 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001966}
1967
Sathya Perla5fb379e2009-06-18 00:02:59 +00001968static void be_mcc_queues_destroy(struct be_adapter *adapter)
1969{
1970 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001971
Sathya Perla8788fdc2009-07-27 22:52:03 +00001972 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001974 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001975 be_queue_free(adapter, q);
1976
Sathya Perla8788fdc2009-07-27 22:52:03 +00001977 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001978 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001979 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980 be_queue_free(adapter, q);
1981}
1982
1983/* Must be called only after TX qs are created as MCC shares TX EQ */
1984static int be_mcc_queues_create(struct be_adapter *adapter)
1985{
1986 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001987
Sathya Perla8788fdc2009-07-27 22:52:03 +00001988 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001989 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001990 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001991 goto err;
1992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 /* Use the default EQ for MCC completions */
1994 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001995 goto mcc_cq_free;
1996
Sathya Perla8788fdc2009-07-27 22:52:03 +00001997 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001998 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1999 goto mcc_cq_destroy;
2000
Sathya Perla8788fdc2009-07-27 22:52:03 +00002001 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002002 goto mcc_q_free;
2003
2004 return 0;
2005
2006mcc_q_free:
2007 be_queue_free(adapter, q);
2008mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002009 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002010mcc_cq_free:
2011 be_queue_free(adapter, cq);
2012err:
2013 return -1;
2014}
2015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016static void be_tx_queues_destroy(struct be_adapter *adapter)
2017{
2018 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002019 struct be_tx_obj *txo;
2020 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021
Sathya Perla3c8def92011-06-12 20:01:58 +00002022 for_all_tx_queues(adapter, txo, i) {
2023 q = &txo->q;
2024 if (q->created)
2025 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2026 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027
Sathya Perla3c8def92011-06-12 20:01:58 +00002028 q = &txo->cq;
2029 if (q->created)
2030 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2031 be_queue_free(adapter, q);
2032 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033}
2034
Sathya Perla77071332013-08-27 16:57:34 +05302035static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002038 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302039 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040
Sathya Perla92bf14a2013-08-27 16:57:32 +05302041 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002042
Sathya Perla3c8def92011-06-12 20:01:58 +00002043 for_all_tx_queues(adapter, txo, i) {
2044 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2046 sizeof(struct be_eth_tx_compl));
2047 if (status)
2048 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 /* If num_evt_qs is less than num_tx_qs, then more than
2051 * one txq share an eq
2052 */
2053 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2054 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2055 if (status)
2056 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2059 sizeof(struct be_eth_wrb));
2060 if (status)
2061 return status;
2062
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002063 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064 if (status)
2065 return status;
2066 }
2067
Sathya Perlad3791422012-09-28 04:39:44 +00002068 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2069 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070 return 0;
2071}
2072
2073static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074{
2075 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 struct be_rx_obj *rxo;
2077 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002080 q = &rxo->cq;
2081 if (q->created)
2082 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2083 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085}
2086
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002087static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002088{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 struct be_rx_obj *rxo;
2091 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092
Sathya Perla92bf14a2013-08-27 16:57:32 +05302093 /* We can create as many RSS rings as there are EQs. */
2094 adapter->num_rx_qs = adapter->num_evt_qs;
2095
2096 /* We'll use RSS only if atleast 2 RSS rings are supported.
2097 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302099 if (adapter->num_rx_qs > 1)
2100 adapter->num_rx_qs++;
2101
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002103 for_all_rx_queues(adapter, rxo, i) {
2104 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 cq = &rxo->cq;
2106 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2107 sizeof(struct be_eth_rx_compl));
2108 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2112 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Sathya Perlad3791422012-09-28 04:39:44 +00002117 dev_info(&adapter->pdev->dev,
2118 "created %d RSS queue(s) and 1 default RX queue\n",
2119 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002120 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002121}
2122
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123static irqreturn_t be_intx(int irq, void *dev)
2124{
Sathya Perlae49cc342012-11-27 19:50:02 +00002125 struct be_eq_obj *eqo = dev;
2126 struct be_adapter *adapter = eqo->adapter;
2127 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002129 /* IRQ is not expected when NAPI is scheduled as the EQ
2130 * will not be armed.
2131 * But, this can happen on Lancer INTx where it takes
2132 * a while to de-assert INTx or in BE2 where occasionaly
2133 * an interrupt may be raised even when EQ is unarmed.
2134 * If NAPI is already scheduled, then counting & notifying
2135 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002136 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002137 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002138 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002139 __napi_schedule(&eqo->napi);
2140 if (num_evts)
2141 eqo->spurious_intr = 0;
2142 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002143 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002144
2145 /* Return IRQ_HANDLED only for the the first spurious intr
2146 * after a valid intr to stop the kernel from branding
2147 * this irq as a bad one!
2148 */
2149 if (num_evts || eqo->spurious_intr++ == 0)
2150 return IRQ_HANDLED;
2151 else
2152 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153}
2154
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002158
Sathya Perla0b545a62012-11-23 00:27:18 +00002159 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2160 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 return IRQ_HANDLED;
2162}
2163
Sathya Perla2e588f82011-03-11 02:49:26 +00002164static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165{
Somnath Koture38b1702013-05-29 22:55:56 +00002166 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167}
2168
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2170 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171{
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 struct be_adapter *adapter = rxo->adapter;
2173 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002174 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 u32 work_done;
2176
2177 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 if (!rxcp)
2180 break;
2181
Sathya Perla12004ae2011-08-02 19:57:46 +00002182 /* Is it a flush compl that has no data */
2183 if (unlikely(rxcp->num_rcvd == 0))
2184 goto loop_continue;
2185
2186 /* Discard compl with partial DMA Lancer B0 */
2187 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002189 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002190 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002191
Sathya Perla12004ae2011-08-02 19:57:46 +00002192 /* On BE drop pkts that arrive due to imperfect filtering in
2193 * promiscuous mode on some skews
2194 */
2195 if (unlikely(rxcp->port != adapter->port_num &&
2196 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002198 goto loop_continue;
2199 }
2200
2201 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002203 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002205loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002206 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 }
2208
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 if (work_done) {
2210 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2213 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 return work_done;
2217}
2218
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2220 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 for (work_done = 0; work_done < budget; work_done++) {
2226 txcp = be_tx_compl_get(&txo->cq);
2227 if (!txcp)
2228 break;
2229 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002230 AMAP_GET_BITS(struct amap_eth_tx_compl,
2231 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 }
2233
2234 if (work_done) {
2235 be_cq_notify(adapter, txo->cq.id, true, work_done);
2236 atomic_sub(num_wrbs, &txo->q.used);
2237
2238 /* As Tx wrbs have been freed up, wake up netdev queue
2239 * if it was stopped due to lack of tx wrbs. */
2240 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2241 atomic_read(&txo->q.used) < txo->q.len / 2) {
2242 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002243 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002244
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2246 tx_stats(txo)->tx_compl += work_done;
2247 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2248 }
2249 return (work_done < budget); /* Done */
2250}
Sathya Perla3c8def92011-06-12 20:01:58 +00002251
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302252int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253{
2254 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2255 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002256 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002258
Sathya Perla0b545a62012-11-23 00:27:18 +00002259 num_evts = events_get(eqo);
2260
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 /* Process all TXQs serviced by this EQ */
2262 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2263 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2264 eqo->tx_budget, i);
2265 if (!tx_done)
2266 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 }
2268
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 /* This loop will iterate twice for EQ0 in which
2270 * completions of the last RXQ (default one) are also processed
2271 * For other EQs the loop iterates only once
2272 */
2273 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2274 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2275 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002276 }
2277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 if (is_mcc_eqo(eqo))
2279 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 if (max_work < budget) {
2282 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002283 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 } else {
2285 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002286 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002287 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289}
2290
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002291void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002292{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002293 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2294 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002295 u32 i;
2296
Sathya Perlad23e9462012-12-17 19:38:51 +00002297 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002298 return;
2299
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002300 if (lancer_chip(adapter)) {
2301 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2302 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2303 sliport_err1 = ioread32(adapter->db +
2304 SLIPORT_ERROR1_OFFSET);
2305 sliport_err2 = ioread32(adapter->db +
2306 SLIPORT_ERROR2_OFFSET);
2307 }
2308 } else {
2309 pci_read_config_dword(adapter->pdev,
2310 PCICFG_UE_STATUS_LOW, &ue_lo);
2311 pci_read_config_dword(adapter->pdev,
2312 PCICFG_UE_STATUS_HIGH, &ue_hi);
2313 pci_read_config_dword(adapter->pdev,
2314 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2315 pci_read_config_dword(adapter->pdev,
2316 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002317
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002318 ue_lo = (ue_lo & ~ue_lo_mask);
2319 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002320 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002321
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002322 /* On certain platforms BE hardware can indicate spurious UEs.
2323 * Allow the h/w to stop working completely in case of a real UE.
2324 * Hence not setting the hw_error for UE detection.
2325 */
2326 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002327 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002328 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002329 "Error detected in the card\n");
2330 }
2331
2332 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2333 dev_err(&adapter->pdev->dev,
2334 "ERR: sliport status 0x%x\n", sliport_status);
2335 dev_err(&adapter->pdev->dev,
2336 "ERR: sliport error1 0x%x\n", sliport_err1);
2337 dev_err(&adapter->pdev->dev,
2338 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002339 }
2340
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002341 if (ue_lo) {
2342 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2343 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002344 dev_err(&adapter->pdev->dev,
2345 "UE: %s bit set\n", ue_status_low_desc[i]);
2346 }
2347 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002348
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002349 if (ue_hi) {
2350 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2351 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002352 dev_err(&adapter->pdev->dev,
2353 "UE: %s bit set\n", ue_status_hi_desc[i]);
2354 }
2355 }
2356
2357}
2358
Sathya Perla8d56ff12009-11-22 22:02:26 +00002359static void be_msix_disable(struct be_adapter *adapter)
2360{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002361 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002362 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002363 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302364 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002365 }
2366}
2367
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002368static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302370 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002371 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372
Sathya Perla92bf14a2013-08-27 16:57:32 +05302373 /* If RoCE is supported, program the max number of NIC vectors that
2374 * may be configured via set-channels, along with vectors needed for
2375 * RoCe. Else, just program the number we'll use initially.
2376 */
2377 if (be_roce_supported(adapter))
2378 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2379 2 * num_online_cpus());
2380 else
2381 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002383 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 adapter->msix_entries[i].entry = i;
2385
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002386 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002387 if (status == 0) {
2388 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302389 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002390 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002391 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2392 num_vec);
2393 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002394 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002395 }
Sathya Perlad3791422012-09-28 04:39:44 +00002396
2397 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302398
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002399 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2400 if (!be_physfn(adapter))
2401 return status;
2402 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002403done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302404 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2405 adapter->num_msix_roce_vec = num_vec / 2;
2406 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2407 adapter->num_msix_roce_vec);
2408 }
2409
2410 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2411
2412 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2413 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002414 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002415}
2416
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002417static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302420 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002421}
2422
2423static int be_msix_register(struct be_adapter *adapter)
2424{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425 struct net_device *netdev = adapter->netdev;
2426 struct be_eq_obj *eqo;
2427 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 for_all_evt_queues(adapter, eqo, i) {
2430 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2431 vec = be_msix_vec_get(adapter, eqo);
2432 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002433 if (status)
2434 goto err_msix;
2435 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002438err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002439 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2440 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2441 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2442 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002443 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444 return status;
2445}
2446
2447static int be_irq_register(struct be_adapter *adapter)
2448{
2449 struct net_device *netdev = adapter->netdev;
2450 int status;
2451
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002452 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002453 status = be_msix_register(adapter);
2454 if (status == 0)
2455 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002456 /* INTx is not supported for VF */
2457 if (!be_physfn(adapter))
2458 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459 }
2460
Sathya Perlae49cc342012-11-27 19:50:02 +00002461 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002462 netdev->irq = adapter->pdev->irq;
2463 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002464 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465 if (status) {
2466 dev_err(&adapter->pdev->dev,
2467 "INTx request IRQ failed - err %d\n", status);
2468 return status;
2469 }
2470done:
2471 adapter->isr_registered = true;
2472 return 0;
2473}
2474
2475static void be_irq_unregister(struct be_adapter *adapter)
2476{
2477 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002479 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480
2481 if (!adapter->isr_registered)
2482 return;
2483
2484 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002485 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002486 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002487 goto done;
2488 }
2489
2490 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002491 for_all_evt_queues(adapter, eqo, i)
2492 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494done:
2495 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496}
2497
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002499{
2500 struct be_queue_info *q;
2501 struct be_rx_obj *rxo;
2502 int i;
2503
2504 for_all_rx_queues(adapter, rxo, i) {
2505 q = &rxo->q;
2506 if (q->created) {
2507 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002508 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002509 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002511 }
2512}
2513
Sathya Perla889cd4b2010-05-30 23:33:45 +00002514static int be_close(struct net_device *netdev)
2515{
2516 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517 struct be_eq_obj *eqo;
2518 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002519
Parav Pandit045508a2012-03-26 14:27:13 +00002520 be_roce_dev_close(adapter);
2521
Somnath Kotur04d3d622013-05-02 03:36:55 +00002522 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2523 for_all_evt_queues(adapter, eqo, i)
2524 napi_disable(&eqo->napi);
2525 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2526 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002527
2528 be_async_mcc_disable(adapter);
2529
2530 /* Wait for all pending tx completions to arrive so that
2531 * all tx skbs are freed.
2532 */
Sathya Perlafba87552013-05-08 02:05:50 +00002533 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302534 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002535
2536 be_rx_qs_destroy(adapter);
2537
2538 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539 if (msix_enabled(adapter))
2540 synchronize_irq(be_msix_vec_get(adapter, eqo));
2541 else
2542 synchronize_irq(netdev->irq);
2543 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002544 }
2545
Sathya Perla889cd4b2010-05-30 23:33:45 +00002546 be_irq_unregister(adapter);
2547
Sathya Perla482c9e72011-06-29 23:33:17 +00002548 return 0;
2549}
2550
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002551static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002552{
2553 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002554 int rc, i, j;
2555 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002556
2557 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002558 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2559 sizeof(struct be_eth_rx_d));
2560 if (rc)
2561 return rc;
2562 }
2563
2564 /* The FW would like the default RXQ to be created first */
2565 rxo = default_rxo(adapter);
2566 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2567 adapter->if_handle, false, &rxo->rss_id);
2568 if (rc)
2569 return rc;
2570
2571 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002572 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 rx_frag_size, adapter->if_handle,
2574 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002575 if (rc)
2576 return rc;
2577 }
2578
2579 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002580 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2581 for_all_rss_queues(adapter, rxo, i) {
2582 if ((j + i) >= 128)
2583 break;
2584 rsstable[j + i] = rxo->rss_id;
2585 }
2586 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002587 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2588 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2589
2590 if (!BEx_chip(adapter))
2591 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2592 RSS_ENABLE_UDP_IPV6;
2593
2594 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2595 128);
2596 if (rc) {
2597 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002598 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002599 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002600 }
2601
2602 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002603 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002604 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002605 return 0;
2606}
2607
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608static int be_open(struct net_device *netdev)
2609{
2610 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002612 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002614 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002615 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002616
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002618 if (status)
2619 goto err;
2620
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002621 status = be_irq_register(adapter);
2622 if (status)
2623 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002624
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002626 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002627
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002628 for_all_tx_queues(adapter, txo, i)
2629 be_cq_notify(adapter, txo->cq.id, true, 0);
2630
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002631 be_async_mcc_enable(adapter);
2632
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 for_all_evt_queues(adapter, eqo, i) {
2634 napi_enable(&eqo->napi);
2635 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2636 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002637 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638
Sathya Perla323ff712012-09-28 04:39:43 +00002639 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002640 if (!status)
2641 be_link_status_update(adapter, link_status);
2642
Sathya Perlafba87552013-05-08 02:05:50 +00002643 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002644 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002645 return 0;
2646err:
2647 be_close(adapter->netdev);
2648 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002649}
2650
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002651static int be_setup_wol(struct be_adapter *adapter, bool enable)
2652{
2653 struct be_dma_mem cmd;
2654 int status = 0;
2655 u8 mac[ETH_ALEN];
2656
2657 memset(mac, 0, ETH_ALEN);
2658
2659 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002660 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2661 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002662 if (cmd.va == NULL)
2663 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002664
2665 if (enable) {
2666 status = pci_write_config_dword(adapter->pdev,
2667 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2668 if (status) {
2669 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002670 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002671 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2672 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002673 return status;
2674 }
2675 status = be_cmd_enable_magic_wol(adapter,
2676 adapter->netdev->dev_addr, &cmd);
2677 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2678 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2679 } else {
2680 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2681 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2682 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2683 }
2684
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002685 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002686 return status;
2687}
2688
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002689/*
2690 * Generate a seed MAC address from the PF MAC Address using jhash.
2691 * MAC Address for VFs are assigned incrementally starting from the seed.
2692 * These addresses are programmed in the ASIC by the PF and the VF driver
2693 * queries for the MAC address during its probe.
2694 */
Sathya Perla4c876612013-02-03 20:30:11 +00002695static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002696{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002697 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002698 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002699 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002700 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002701
2702 be_vf_eth_addr_generate(adapter, mac);
2703
Sathya Perla11ac75e2011-12-13 00:58:50 +00002704 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302705 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002706 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002707 vf_cfg->if_handle,
2708 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302709 else
2710 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2711 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002712
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002713 if (status)
2714 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002715 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002716 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002717 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002718
2719 mac[5] += 1;
2720 }
2721 return status;
2722}
2723
Sathya Perla4c876612013-02-03 20:30:11 +00002724static int be_vfs_mac_query(struct be_adapter *adapter)
2725{
2726 int status, vf;
2727 u8 mac[ETH_ALEN];
2728 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302729 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002730
2731 for_all_vfs(adapter, vf_cfg, vf) {
2732 be_cmd_get_mac_from_list(adapter, mac, &active,
2733 &vf_cfg->pmac_id, 0);
2734
2735 status = be_cmd_mac_addr_query(adapter, mac, false,
2736 vf_cfg->if_handle, 0);
2737 if (status)
2738 return status;
2739 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2740 }
2741 return 0;
2742}
2743
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002744static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002745{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002746 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002747 u32 vf;
2748
Sathya Perla257a3fe2013-06-14 15:54:51 +05302749 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002750 dev_warn(&adapter->pdev->dev,
2751 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002752 goto done;
2753 }
2754
Sathya Perlab4c1df92013-05-08 02:05:47 +00002755 pci_disable_sriov(adapter->pdev);
2756
Sathya Perla11ac75e2011-12-13 00:58:50 +00002757 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302758 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002759 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2760 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302761 else
2762 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2763 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002764
Sathya Perla11ac75e2011-12-13 00:58:50 +00002765 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2766 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002767done:
2768 kfree(adapter->vf_cfg);
2769 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002770}
2771
Sathya Perla77071332013-08-27 16:57:34 +05302772static void be_clear_queues(struct be_adapter *adapter)
2773{
2774 be_mcc_queues_destroy(adapter);
2775 be_rx_cqs_destroy(adapter);
2776 be_tx_queues_destroy(adapter);
2777 be_evt_queues_destroy(adapter);
2778}
2779
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302780static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002781{
Sathya Perla191eb752012-02-23 18:50:13 +00002782 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2783 cancel_delayed_work_sync(&adapter->work);
2784 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2785 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302786}
2787
2788static int be_clear(struct be_adapter *adapter)
2789{
2790 int i;
2791
2792 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002793
Sathya Perla11ac75e2011-12-13 00:58:50 +00002794 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002795 be_vf_clear(adapter);
2796
Sathya Perla2d17f402013-07-23 15:25:04 +05302797 /* delete the primary mac along with the uc-mac list */
2798 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002799 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302800 adapter->pmac_id[i], 0);
2801 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002802
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002803 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002804
Sathya Perla77071332013-08-27 16:57:34 +05302805 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002806
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002807 kfree(adapter->pmac_id);
2808 adapter->pmac_id = NULL;
2809
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002810 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002811 return 0;
2812}
2813
Sathya Perla4c876612013-02-03 20:30:11 +00002814static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002815{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302816 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002817 struct be_vf_cfg *vf_cfg;
2818 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002819 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002820
Sathya Perla4c876612013-02-03 20:30:11 +00002821 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2822 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002823
Sathya Perla4c876612013-02-03 20:30:11 +00002824 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302825 if (!BE3_chip(adapter)) {
2826 status = be_cmd_get_profile_config(adapter, &res,
2827 vf + 1);
2828 if (!status)
2829 cap_flags = res.if_cap_flags;
2830 }
Sathya Perla4c876612013-02-03 20:30:11 +00002831
2832 /* If a FW profile exists, then cap_flags are updated */
2833 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2834 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2835 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2836 &vf_cfg->if_handle, vf + 1);
2837 if (status)
2838 goto err;
2839 }
2840err:
2841 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002842}
2843
Sathya Perla39f1d942012-05-08 19:41:24 +00002844static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002845{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002846 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002847 int vf;
2848
Sathya Perla39f1d942012-05-08 19:41:24 +00002849 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2850 GFP_KERNEL);
2851 if (!adapter->vf_cfg)
2852 return -ENOMEM;
2853
Sathya Perla11ac75e2011-12-13 00:58:50 +00002854 for_all_vfs(adapter, vf_cfg, vf) {
2855 vf_cfg->if_handle = -1;
2856 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002857 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002858 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002859}
2860
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002861static int be_vf_setup(struct be_adapter *adapter)
2862{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002863 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002864 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002865 int status, old_vfs, vf;
2866 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302867 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002868
Sathya Perla257a3fe2013-06-14 15:54:51 +05302869 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002870 if (old_vfs) {
2871 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2872 if (old_vfs != num_vfs)
2873 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2874 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002875 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302876 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002877 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302878 be_max_vfs(adapter), num_vfs);
2879 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002880 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002881 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002882 }
2883
2884 status = be_vf_setup_init(adapter);
2885 if (status)
2886 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002887
Sathya Perla4c876612013-02-03 20:30:11 +00002888 if (old_vfs) {
2889 for_all_vfs(adapter, vf_cfg, vf) {
2890 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2891 if (status)
2892 goto err;
2893 }
2894 } else {
2895 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002896 if (status)
2897 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002898 }
2899
Sathya Perla4c876612013-02-03 20:30:11 +00002900 if (old_vfs) {
2901 status = be_vfs_mac_query(adapter);
2902 if (status)
2903 goto err;
2904 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002905 status = be_vf_eth_addr_config(adapter);
2906 if (status)
2907 goto err;
2908 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002909
Sathya Perla11ac75e2011-12-13 00:58:50 +00002910 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302911 /* Allow VFs to programs MAC/VLAN filters */
2912 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2913 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2914 status = be_cmd_set_fn_privileges(adapter,
2915 privileges |
2916 BE_PRIV_FILTMGMT,
2917 vf + 1);
2918 if (!status)
2919 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2920 vf);
2921 }
2922
Sathya Perla4c876612013-02-03 20:30:11 +00002923 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2924 * Allow full available bandwidth
2925 */
2926 if (BE3_chip(adapter) && !old_vfs)
2927 be_cmd_set_qos(adapter, 1000, vf+1);
2928
2929 status = be_cmd_link_status_query(adapter, &lnk_speed,
2930 NULL, vf + 1);
2931 if (!status)
2932 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002933
2934 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05002935 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002936 if (status)
2937 goto err;
2938 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002939
2940 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002941 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002942
2943 if (!old_vfs) {
2944 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2945 if (status) {
2946 dev_err(dev, "SRIOV enable failed\n");
2947 adapter->num_vfs = 0;
2948 goto err;
2949 }
2950 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002951 return 0;
2952err:
Sathya Perla4c876612013-02-03 20:30:11 +00002953 dev_err(dev, "VF setup failed\n");
2954 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002955 return status;
2956}
2957
Sathya Perla92bf14a2013-08-27 16:57:32 +05302958/* On BE2/BE3 FW does not suggest the supported limits */
2959static void BEx_get_resources(struct be_adapter *adapter,
2960 struct be_resources *res)
2961{
2962 struct pci_dev *pdev = adapter->pdev;
2963 bool use_sriov = false;
2964
2965 if (BE3_chip(adapter) && be_physfn(adapter)) {
2966 int max_vfs;
2967
2968 max_vfs = pci_sriov_get_totalvfs(pdev);
2969 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2970 use_sriov = res->max_vfs && num_vfs;
2971 }
2972
2973 if (be_physfn(adapter))
2974 res->max_uc_mac = BE_UC_PMAC_COUNT;
2975 else
2976 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2977
2978 if (adapter->function_mode & FLEX10_MODE)
2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05002980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302982 else
2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2984 res->max_mcast_mac = BE_MAX_MC;
2985
2986 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2987 !be_physfn(adapter))
2988 res->max_tx_qs = 1;
2989 else
2990 res->max_tx_qs = BE3_MAX_TX_QS;
2991
2992 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2993 !use_sriov && be_physfn(adapter))
2994 res->max_rss_qs = (adapter->be3_native) ?
2995 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2996 res->max_rx_qs = res->max_rss_qs + 1;
2997
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302998 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302999
3000 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3001 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3002 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3003}
3004
Sathya Perla30128032011-11-10 19:17:57 +00003005static void be_setup_init(struct be_adapter *adapter)
3006{
3007 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003008 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003009 adapter->if_handle = -1;
3010 adapter->be3_native = false;
3011 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003012 if (be_physfn(adapter))
3013 adapter->cmd_privileges = MAX_PRIVILEGES;
3014 else
3015 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003016}
3017
Sathya Perla92bf14a2013-08-27 16:57:32 +05303018static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003019{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303020 struct device *dev = &adapter->pdev->dev;
3021 struct be_resources res = {0};
3022 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003023
Sathya Perla92bf14a2013-08-27 16:57:32 +05303024 if (BEx_chip(adapter)) {
3025 BEx_get_resources(adapter, &res);
3026 adapter->res = res;
3027 }
3028
3029 /* For BE3 only check if FW suggests a different max-txqs value */
3030 if (BE3_chip(adapter)) {
3031 status = be_cmd_get_profile_config(adapter, &res, 0);
3032 if (!status && res.max_tx_qs)
3033 adapter->res.max_tx_qs =
3034 min(adapter->res.max_tx_qs, res.max_tx_qs);
3035 }
3036
3037 /* For Lancer, SH etc read per-function resource limits from FW.
3038 * GET_FUNC_CONFIG returns per function guaranteed limits.
3039 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3040 */
Sathya Perla4c876612013-02-03 20:30:11 +00003041 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303042 status = be_cmd_get_func_config(adapter, &res);
3043 if (status)
3044 return status;
3045
3046 /* If RoCE may be enabled stash away half the EQs for RoCE */
3047 if (be_roce_supported(adapter))
3048 res.max_evt_qs /= 2;
3049 adapter->res = res;
3050
3051 if (be_physfn(adapter)) {
3052 status = be_cmd_get_profile_config(adapter, &res, 0);
3053 if (status)
3054 return status;
3055 adapter->res.max_vfs = res.max_vfs;
3056 }
3057
3058 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3059 be_max_txqs(adapter), be_max_rxqs(adapter),
3060 be_max_rss(adapter), be_max_eqs(adapter),
3061 be_max_vfs(adapter));
3062 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3063 be_max_uc(adapter), be_max_mc(adapter),
3064 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003065 }
3066
Sathya Perla92bf14a2013-08-27 16:57:32 +05303067 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003068}
3069
Sathya Perla39f1d942012-05-08 19:41:24 +00003070/* Routine to query per function resource limits */
3071static int be_get_config(struct be_adapter *adapter)
3072{
Sathya Perla4c876612013-02-03 20:30:11 +00003073 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003074
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003075 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3076 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003077 &adapter->function_caps,
3078 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003079 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303080 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003081
Sathya Perla92bf14a2013-08-27 16:57:32 +05303082 status = be_get_resources(adapter);
3083 if (status)
3084 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003085
3086 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303087 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3088 GFP_KERNEL);
3089 if (!adapter->pmac_id)
3090 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003091
Sathya Perla92bf14a2013-08-27 16:57:32 +05303092 /* Sanitize cfg_num_qs based on HW and platform limits */
3093 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3094
3095 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003096}
3097
Sathya Perla95046b92013-07-23 15:25:02 +05303098static int be_mac_setup(struct be_adapter *adapter)
3099{
3100 u8 mac[ETH_ALEN];
3101 int status;
3102
3103 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3104 status = be_cmd_get_perm_mac(adapter, mac);
3105 if (status)
3106 return status;
3107
3108 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3109 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3110 } else {
3111 /* Maybe the HW was reset; dev_addr must be re-programmed */
3112 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3113 }
3114
3115 /* On BE3 VFs this cmd may fail due to lack of privilege.
3116 * Ignore the failure as in this case pmac_id is fetched
3117 * in the IFACE_CREATE cmd.
3118 */
3119 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3120 &adapter->pmac_id[0], 0);
3121 return 0;
3122}
3123
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303124static void be_schedule_worker(struct be_adapter *adapter)
3125{
3126 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3127 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3128}
3129
Sathya Perla77071332013-08-27 16:57:34 +05303130static int be_setup_queues(struct be_adapter *adapter)
3131{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303132 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303133 int status;
3134
3135 status = be_evt_queues_create(adapter);
3136 if (status)
3137 goto err;
3138
3139 status = be_tx_qs_create(adapter);
3140 if (status)
3141 goto err;
3142
3143 status = be_rx_cqs_create(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_mcc_queues_create(adapter);
3148 if (status)
3149 goto err;
3150
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303151 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3152 if (status)
3153 goto err;
3154
3155 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3156 if (status)
3157 goto err;
3158
Sathya Perla77071332013-08-27 16:57:34 +05303159 return 0;
3160err:
3161 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3162 return status;
3163}
3164
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303165int be_update_queues(struct be_adapter *adapter)
3166{
3167 struct net_device *netdev = adapter->netdev;
3168 int status;
3169
3170 if (netif_running(netdev))
3171 be_close(netdev);
3172
3173 be_cancel_worker(adapter);
3174
3175 /* If any vectors have been shared with RoCE we cannot re-program
3176 * the MSIx table.
3177 */
3178 if (!adapter->num_msix_roce_vec)
3179 be_msix_disable(adapter);
3180
3181 be_clear_queues(adapter);
3182
3183 if (!msix_enabled(adapter)) {
3184 status = be_msix_enable(adapter);
3185 if (status)
3186 return status;
3187 }
3188
3189 status = be_setup_queues(adapter);
3190 if (status)
3191 return status;
3192
3193 be_schedule_worker(adapter);
3194
3195 if (netif_running(netdev))
3196 status = be_open(netdev);
3197
3198 return status;
3199}
3200
Sathya Perla5fb379e2009-06-18 00:02:59 +00003201static int be_setup(struct be_adapter *adapter)
3202{
Sathya Perla39f1d942012-05-08 19:41:24 +00003203 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303204 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003205 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206
Sathya Perla30128032011-11-10 19:17:57 +00003207 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003208
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003209 if (!lancer_chip(adapter))
3210 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003211
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003212 status = be_get_config(adapter);
3213 if (status)
3214 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003215
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003216 status = be_msix_enable(adapter);
3217 if (status)
3218 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003219
Sathya Perla77071332013-08-27 16:57:34 +05303220 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3221 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3222 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3223 en_flags |= BE_IF_FLAGS_RSS;
3224 en_flags = en_flags & be_if_cap_flags(adapter);
3225 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3226 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003227 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003228 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003229
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303230 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3231 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303232 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303233 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003234 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003235 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003236
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003237 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3238 /* In UMC mode FW does not return right privileges.
3239 * Override with correct privilege equivalent to PF.
3240 */
3241 if (be_is_mc(adapter))
3242 adapter->cmd_privileges = MAX_PRIVILEGES;
3243
Sathya Perla95046b92013-07-23 15:25:02 +05303244 status = be_mac_setup(adapter);
3245 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003246 goto err;
3247
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003248 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003249
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003250 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003251 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003252
3253 be_set_rx_mode(adapter->netdev);
3254
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003255 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003256
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003257 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3258 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003259 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003260
Sathya Perla92bf14a2013-08-27 16:57:32 +05303261 if (be_physfn(adapter) && num_vfs) {
3262 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003263 be_vf_setup(adapter);
3264 else
3265 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003266 }
3267
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003268 status = be_cmd_get_phy_info(adapter);
3269 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003270 adapter->phy.fc_autoneg = 1;
3271
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303272 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003273 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003274err:
3275 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276 return status;
3277}
3278
Ivan Vecera66268732011-12-08 01:31:21 +00003279#ifdef CONFIG_NET_POLL_CONTROLLER
3280static void be_netpoll(struct net_device *netdev)
3281{
3282 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003283 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003284 int i;
3285
Sathya Perlae49cc342012-11-27 19:50:02 +00003286 for_all_evt_queues(adapter, eqo, i) {
3287 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3288 napi_schedule(&eqo->napi);
3289 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003290
3291 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003292}
3293#endif
3294
Ajit Khaparde84517482009-09-04 03:12:16 +00003295#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003296static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003297
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003298static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003299 const u8 *p, u32 img_start, int image_size,
3300 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003301{
3302 u32 crc_offset;
3303 u8 flashed_crc[4];
3304 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003305
3306 crc_offset = hdr_size + img_start + image_size - 4;
3307
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003308 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003309
3310 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003311 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003312 if (status) {
3313 dev_err(&adapter->pdev->dev,
3314 "could not get crc from flash, not flashing redboot\n");
3315 return false;
3316 }
3317
3318 /*update redboot only if crc does not match*/
3319 if (!memcmp(flashed_crc, p, 4))
3320 return false;
3321 else
3322 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003323}
3324
Sathya Perla306f1342011-08-02 19:57:45 +00003325static bool phy_flashing_required(struct be_adapter *adapter)
3326{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003327 return (adapter->phy.phy_type == TN_8022 &&
3328 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003329}
3330
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003331static bool is_comp_in_ufi(struct be_adapter *adapter,
3332 struct flash_section_info *fsec, int type)
3333{
3334 int i = 0, img_type = 0;
3335 struct flash_section_info_g2 *fsec_g2 = NULL;
3336
Sathya Perlaca34fe32012-11-06 17:48:56 +00003337 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003338 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3339
3340 for (i = 0; i < MAX_FLASH_COMP; i++) {
3341 if (fsec_g2)
3342 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3343 else
3344 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3345
3346 if (img_type == type)
3347 return true;
3348 }
3349 return false;
3350
3351}
3352
Jingoo Han4188e7d2013-08-05 18:02:02 +09003353static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003354 int header_size,
3355 const struct firmware *fw)
3356{
3357 struct flash_section_info *fsec = NULL;
3358 const u8 *p = fw->data;
3359
3360 p += header_size;
3361 while (p < (fw->data + fw->size)) {
3362 fsec = (struct flash_section_info *)p;
3363 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3364 return fsec;
3365 p += 32;
3366 }
3367 return NULL;
3368}
3369
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003370static int be_flash(struct be_adapter *adapter, const u8 *img,
3371 struct be_dma_mem *flash_cmd, int optype, int img_size)
3372{
3373 u32 total_bytes = 0, flash_op, num_bytes = 0;
3374 int status = 0;
3375 struct be_cmd_write_flashrom *req = flash_cmd->va;
3376
3377 total_bytes = img_size;
3378 while (total_bytes) {
3379 num_bytes = min_t(u32, 32*1024, total_bytes);
3380
3381 total_bytes -= num_bytes;
3382
3383 if (!total_bytes) {
3384 if (optype == OPTYPE_PHY_FW)
3385 flash_op = FLASHROM_OPER_PHY_FLASH;
3386 else
3387 flash_op = FLASHROM_OPER_FLASH;
3388 } else {
3389 if (optype == OPTYPE_PHY_FW)
3390 flash_op = FLASHROM_OPER_PHY_SAVE;
3391 else
3392 flash_op = FLASHROM_OPER_SAVE;
3393 }
3394
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003395 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003396 img += num_bytes;
3397 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3398 flash_op, num_bytes);
3399 if (status) {
3400 if (status == ILLEGAL_IOCTL_REQ &&
3401 optype == OPTYPE_PHY_FW)
3402 break;
3403 dev_err(&adapter->pdev->dev,
3404 "cmd to write to flash rom failed.\n");
3405 return status;
3406 }
3407 }
3408 return 0;
3409}
3410
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003411/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003412static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003413 const struct firmware *fw,
3414 struct be_dma_mem *flash_cmd,
3415 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003416
Ajit Khaparde84517482009-09-04 03:12:16 +00003417{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003418 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003419 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003420 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003421 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003422 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003423 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003424
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003425 struct flash_comp gen3_flash_types[] = {
3426 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3427 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3428 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3429 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3430 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3431 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3432 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3433 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3434 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3435 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3436 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3437 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3438 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3439 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3440 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3441 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3442 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3443 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3444 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3445 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003446 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003447
3448 struct flash_comp gen2_flash_types[] = {
3449 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3450 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3451 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3452 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3453 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3454 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3455 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3456 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3457 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3458 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3459 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3460 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3461 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3462 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3463 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3464 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003465 };
3466
Sathya Perlaca34fe32012-11-06 17:48:56 +00003467 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003468 pflashcomp = gen3_flash_types;
3469 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003470 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003471 } else {
3472 pflashcomp = gen2_flash_types;
3473 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003474 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003475 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003476
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003477 /* Get flash section info*/
3478 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3479 if (!fsec) {
3480 dev_err(&adapter->pdev->dev,
3481 "Invalid Cookie. UFI corrupted ?\n");
3482 return -1;
3483 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003484 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003485 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003486 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003487
3488 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3489 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3490 continue;
3491
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003492 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3493 !phy_flashing_required(adapter))
3494 continue;
3495
3496 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3497 redboot = be_flash_redboot(adapter, fw->data,
3498 pflashcomp[i].offset, pflashcomp[i].size,
3499 filehdr_size + img_hdrs_size);
3500 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003501 continue;
3502 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003503
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003504 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003505 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003506 if (p + pflashcomp[i].size > fw->data + fw->size)
3507 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003508
3509 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3510 pflashcomp[i].size);
3511 if (status) {
3512 dev_err(&adapter->pdev->dev,
3513 "Flashing section type %d failed.\n",
3514 pflashcomp[i].img_type);
3515 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003516 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003517 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003518 return 0;
3519}
3520
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003521static int be_flash_skyhawk(struct be_adapter *adapter,
3522 const struct firmware *fw,
3523 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003524{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003525 int status = 0, i, filehdr_size = 0;
3526 int img_offset, img_size, img_optype, redboot;
3527 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3528 const u8 *p = fw->data;
3529 struct flash_section_info *fsec = NULL;
3530
3531 filehdr_size = sizeof(struct flash_file_hdr_g3);
3532 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3533 if (!fsec) {
3534 dev_err(&adapter->pdev->dev,
3535 "Invalid Cookie. UFI corrupted ?\n");
3536 return -1;
3537 }
3538
3539 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3540 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3541 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3542
3543 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3544 case IMAGE_FIRMWARE_iSCSI:
3545 img_optype = OPTYPE_ISCSI_ACTIVE;
3546 break;
3547 case IMAGE_BOOT_CODE:
3548 img_optype = OPTYPE_REDBOOT;
3549 break;
3550 case IMAGE_OPTION_ROM_ISCSI:
3551 img_optype = OPTYPE_BIOS;
3552 break;
3553 case IMAGE_OPTION_ROM_PXE:
3554 img_optype = OPTYPE_PXE_BIOS;
3555 break;
3556 case IMAGE_OPTION_ROM_FCoE:
3557 img_optype = OPTYPE_FCOE_BIOS;
3558 break;
3559 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3560 img_optype = OPTYPE_ISCSI_BACKUP;
3561 break;
3562 case IMAGE_NCSI:
3563 img_optype = OPTYPE_NCSI_FW;
3564 break;
3565 default:
3566 continue;
3567 }
3568
3569 if (img_optype == OPTYPE_REDBOOT) {
3570 redboot = be_flash_redboot(adapter, fw->data,
3571 img_offset, img_size,
3572 filehdr_size + img_hdrs_size);
3573 if (!redboot)
3574 continue;
3575 }
3576
3577 p = fw->data;
3578 p += filehdr_size + img_offset + img_hdrs_size;
3579 if (p + img_size > fw->data + fw->size)
3580 return -1;
3581
3582 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3583 if (status) {
3584 dev_err(&adapter->pdev->dev,
3585 "Flashing section type %d failed.\n",
3586 fsec->fsec_entry[i].type);
3587 return status;
3588 }
3589 }
3590 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003591}
3592
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003593static int lancer_fw_download(struct be_adapter *adapter,
3594 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003595{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003596#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3597#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3598 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003599 const u8 *data_ptr = NULL;
3600 u8 *dest_image_ptr = NULL;
3601 size_t image_size = 0;
3602 u32 chunk_size = 0;
3603 u32 data_written = 0;
3604 u32 offset = 0;
3605 int status = 0;
3606 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003607 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003608
3609 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3610 dev_err(&adapter->pdev->dev,
3611 "FW Image not properly aligned. "
3612 "Length must be 4 byte aligned.\n");
3613 status = -EINVAL;
3614 goto lancer_fw_exit;
3615 }
3616
3617 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3618 + LANCER_FW_DOWNLOAD_CHUNK;
3619 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003620 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003621 if (!flash_cmd.va) {
3622 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003623 goto lancer_fw_exit;
3624 }
3625
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003626 dest_image_ptr = flash_cmd.va +
3627 sizeof(struct lancer_cmd_req_write_object);
3628 image_size = fw->size;
3629 data_ptr = fw->data;
3630
3631 while (image_size) {
3632 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3633
3634 /* Copy the image chunk content. */
3635 memcpy(dest_image_ptr, data_ptr, chunk_size);
3636
3637 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003638 chunk_size, offset,
3639 LANCER_FW_DOWNLOAD_LOCATION,
3640 &data_written, &change_status,
3641 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003642 if (status)
3643 break;
3644
3645 offset += data_written;
3646 data_ptr += data_written;
3647 image_size -= data_written;
3648 }
3649
3650 if (!status) {
3651 /* Commit the FW written */
3652 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003653 0, offset,
3654 LANCER_FW_DOWNLOAD_LOCATION,
3655 &data_written, &change_status,
3656 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003657 }
3658
3659 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3660 flash_cmd.dma);
3661 if (status) {
3662 dev_err(&adapter->pdev->dev,
3663 "Firmware load error. "
3664 "Status code: 0x%x Additional Status: 0x%x\n",
3665 status, add_status);
3666 goto lancer_fw_exit;
3667 }
3668
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003669 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003670 status = lancer_physdev_ctrl(adapter,
3671 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003672 if (status) {
3673 dev_err(&adapter->pdev->dev,
3674 "Adapter busy for FW reset.\n"
3675 "New FW will not be active.\n");
3676 goto lancer_fw_exit;
3677 }
3678 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3679 dev_err(&adapter->pdev->dev,
3680 "System reboot required for new FW"
3681 " to be active\n");
3682 }
3683
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003684 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3685lancer_fw_exit:
3686 return status;
3687}
3688
Sathya Perlaca34fe32012-11-06 17:48:56 +00003689#define UFI_TYPE2 2
3690#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003691#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003692#define UFI_TYPE4 4
3693static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003694 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003695{
3696 if (fhdr == NULL)
3697 goto be_get_ufi_exit;
3698
Sathya Perlaca34fe32012-11-06 17:48:56 +00003699 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3700 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003701 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3702 if (fhdr->asic_type_rev == 0x10)
3703 return UFI_TYPE3R;
3704 else
3705 return UFI_TYPE3;
3706 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003707 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003708
3709be_get_ufi_exit:
3710 dev_err(&adapter->pdev->dev,
3711 "UFI and Interface are not compatible for flashing\n");
3712 return -1;
3713}
3714
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003715static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3716{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003717 struct flash_file_hdr_g3 *fhdr3;
3718 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003719 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003720 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003721 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003722
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003723 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003724 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3725 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003726 if (!flash_cmd.va) {
3727 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003728 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003729 }
3730
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003731 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003732 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003733
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003734 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003735
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003736 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3737 for (i = 0; i < num_imgs; i++) {
3738 img_hdr_ptr = (struct image_hdr *)(fw->data +
3739 (sizeof(struct flash_file_hdr_g3) +
3740 i * sizeof(struct image_hdr)));
3741 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003742 switch (ufi_type) {
3743 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003744 status = be_flash_skyhawk(adapter, fw,
3745 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003746 break;
3747 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003748 status = be_flash_BEx(adapter, fw, &flash_cmd,
3749 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003750 break;
3751 case UFI_TYPE3:
3752 /* Do not flash this ufi on BE3-R cards */
3753 if (adapter->asic_rev < 0x10)
3754 status = be_flash_BEx(adapter, fw,
3755 &flash_cmd,
3756 num_imgs);
3757 else {
3758 status = -1;
3759 dev_err(&adapter->pdev->dev,
3760 "Can't load BE3 UFI on BE3R\n");
3761 }
3762 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003763 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003764 }
3765
Sathya Perlaca34fe32012-11-06 17:48:56 +00003766 if (ufi_type == UFI_TYPE2)
3767 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003768 else if (ufi_type == -1)
3769 status = -1;
3770
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003771 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3772 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003773 if (status) {
3774 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003775 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003776 }
3777
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003778 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003779
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003780be_fw_exit:
3781 return status;
3782}
3783
3784int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3785{
3786 const struct firmware *fw;
3787 int status;
3788
3789 if (!netif_running(adapter->netdev)) {
3790 dev_err(&adapter->pdev->dev,
3791 "Firmware load not allowed (interface is down)\n");
3792 return -1;
3793 }
3794
3795 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3796 if (status)
3797 goto fw_exit;
3798
3799 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3800
3801 if (lancer_chip(adapter))
3802 status = lancer_fw_download(adapter, fw);
3803 else
3804 status = be_fw_download(adapter, fw);
3805
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003806 if (!status)
3807 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3808 adapter->fw_on_flash);
3809
Ajit Khaparde84517482009-09-04 03:12:16 +00003810fw_exit:
3811 release_firmware(fw);
3812 return status;
3813}
3814
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003815static int be_ndo_bridge_setlink(struct net_device *dev,
3816 struct nlmsghdr *nlh)
3817{
3818 struct be_adapter *adapter = netdev_priv(dev);
3819 struct nlattr *attr, *br_spec;
3820 int rem;
3821 int status = 0;
3822 u16 mode = 0;
3823
3824 if (!sriov_enabled(adapter))
3825 return -EOPNOTSUPP;
3826
3827 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3828
3829 nla_for_each_nested(attr, br_spec, rem) {
3830 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3831 continue;
3832
3833 mode = nla_get_u16(attr);
3834 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3835 return -EINVAL;
3836
3837 status = be_cmd_set_hsw_config(adapter, 0, 0,
3838 adapter->if_handle,
3839 mode == BRIDGE_MODE_VEPA ?
3840 PORT_FWD_TYPE_VEPA :
3841 PORT_FWD_TYPE_VEB);
3842 if (status)
3843 goto err;
3844
3845 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3846 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3847
3848 return status;
3849 }
3850err:
3851 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3852 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3853
3854 return status;
3855}
3856
3857static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3858 struct net_device *dev,
3859 u32 filter_mask)
3860{
3861 struct be_adapter *adapter = netdev_priv(dev);
3862 int status = 0;
3863 u8 hsw_mode;
3864
3865 if (!sriov_enabled(adapter))
3866 return 0;
3867
3868 /* BE and Lancer chips support VEB mode only */
3869 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3870 hsw_mode = PORT_FWD_TYPE_VEB;
3871 } else {
3872 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3873 adapter->if_handle, &hsw_mode);
3874 if (status)
3875 return 0;
3876 }
3877
3878 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3879 hsw_mode == PORT_FWD_TYPE_VEPA ?
3880 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3881}
3882
stephen hemmingere5686ad2012-01-05 19:10:25 +00003883static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003884 .ndo_open = be_open,
3885 .ndo_stop = be_close,
3886 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003887 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003888 .ndo_set_mac_address = be_mac_addr_set,
3889 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003890 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003891 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003892 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3893 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003894 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003895 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003896 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003897 .ndo_get_vf_config = be_get_vf_config,
3898#ifdef CONFIG_NET_POLL_CONTROLLER
3899 .ndo_poll_controller = be_netpoll,
3900#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003901 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3902 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903};
3904
3905static void be_netdev_init(struct net_device *netdev)
3906{
3907 struct be_adapter *adapter = netdev_priv(netdev);
3908
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003909 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003910 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003911 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003912 if (be_multi_rxq(adapter))
3913 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003914
3915 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003916 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003917
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003918 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003919 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003920
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003921 netdev->priv_flags |= IFF_UNICAST_FLT;
3922
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003923 netdev->flags |= IFF_MULTICAST;
3924
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003925 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003926
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003927 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003928
3929 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003930}
3931
3932static void be_unmap_pci_bars(struct be_adapter *adapter)
3933{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003934 if (adapter->csr)
3935 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003936 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003937 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003938}
3939
Sathya Perlace66f782012-11-06 17:48:58 +00003940static int db_bar(struct be_adapter *adapter)
3941{
3942 if (lancer_chip(adapter) || !be_physfn(adapter))
3943 return 0;
3944 else
3945 return 4;
3946}
3947
3948static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003949{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003950 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003951 adapter->roce_db.size = 4096;
3952 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3953 db_bar(adapter));
3954 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3955 db_bar(adapter));
3956 }
Parav Pandit045508a2012-03-26 14:27:13 +00003957 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003958}
3959
3960static int be_map_pci_bars(struct be_adapter *adapter)
3961{
3962 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003963 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003964
Sathya Perlace66f782012-11-06 17:48:58 +00003965 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3966 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3967 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003968
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003969 if (BEx_chip(adapter) && be_physfn(adapter)) {
3970 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3971 if (adapter->csr == NULL)
3972 return -ENOMEM;
3973 }
3974
Sathya Perlace66f782012-11-06 17:48:58 +00003975 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003976 if (addr == NULL)
3977 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003978 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003979
3980 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003981 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003982
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003983pci_map_err:
3984 be_unmap_pci_bars(adapter);
3985 return -ENOMEM;
3986}
3987
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988static void be_ctrl_cleanup(struct be_adapter *adapter)
3989{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003990 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003991
3992 be_unmap_pci_bars(adapter);
3993
3994 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003995 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3996 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003997
Sathya Perla5b8821b2011-08-02 19:57:44 +00003998 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003999 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004000 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4001 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004002}
4003
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004004static int be_ctrl_init(struct be_adapter *adapter)
4005{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004006 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4007 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004008 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004009 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004010 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004011
Sathya Perlace66f782012-11-06 17:48:58 +00004012 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4013 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4014 SLI_INTF_FAMILY_SHIFT;
4015 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4016
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004017 status = be_map_pci_bars(adapter);
4018 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004019 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004020
4021 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004022 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4023 mbox_mem_alloc->size,
4024 &mbox_mem_alloc->dma,
4025 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004026 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004027 status = -ENOMEM;
4028 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004029 }
4030 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4031 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4032 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4033 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004034
Sathya Perla5b8821b2011-08-02 19:57:44 +00004035 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004036 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4037 rx_filter->size, &rx_filter->dma,
4038 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004039 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004040 status = -ENOMEM;
4041 goto free_mbox;
4042 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004043
Ivan Vecera29849612010-12-14 05:43:19 +00004044 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004045 spin_lock_init(&adapter->mcc_lock);
4046 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004047
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07004048 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004049 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004050 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004051
4052free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004053 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4054 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004055
4056unmap_pci_bars:
4057 be_unmap_pci_bars(adapter);
4058
4059done:
4060 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004061}
4062
4063static void be_stats_cleanup(struct be_adapter *adapter)
4064{
Sathya Perla3abcded2010-10-03 22:12:27 -07004065 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004066
4067 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004068 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4069 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070}
4071
4072static int be_stats_init(struct be_adapter *adapter)
4073{
Sathya Perla3abcded2010-10-03 22:12:27 -07004074 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004075
Sathya Perlaca34fe32012-11-06 17:48:56 +00004076 if (lancer_chip(adapter))
4077 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4078 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004079 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004080 else
4081 /* BE3 and Skyhawk */
4082 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4083
Joe Perchesede23fa82013-08-26 22:45:23 -07004084 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4085 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004086 if (cmd->va == NULL)
4087 return -1;
4088 return 0;
4089}
4090
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004091static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004092{
4093 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004094
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095 if (!adapter)
4096 return;
4097
Parav Pandit045508a2012-03-26 14:27:13 +00004098 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004099 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004100
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004101 cancel_delayed_work_sync(&adapter->func_recovery_work);
4102
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004103 unregister_netdev(adapter->netdev);
4104
Sathya Perla5fb379e2009-06-18 00:02:59 +00004105 be_clear(adapter);
4106
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004107 /* tell fw we're done with firing cmds */
4108 be_cmd_fw_clean(adapter);
4109
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004110 be_stats_cleanup(adapter);
4111
4112 be_ctrl_cleanup(adapter);
4113
Sathya Perlad6b6d982012-09-05 01:56:48 +00004114 pci_disable_pcie_error_reporting(pdev);
4115
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004116 pci_set_drvdata(pdev, NULL);
4117 pci_release_regions(pdev);
4118 pci_disable_device(pdev);
4119
4120 free_netdev(adapter->netdev);
4121}
4122
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004123bool be_is_wol_supported(struct be_adapter *adapter)
4124{
4125 return ((adapter->wol_cap & BE_WOL_CAP) &&
4126 !be_is_wol_excluded(adapter)) ? true : false;
4127}
4128
Somnath Kotur941a77d2012-05-17 22:59:03 +00004129u32 be_get_fw_log_level(struct be_adapter *adapter)
4130{
4131 struct be_dma_mem extfat_cmd;
4132 struct be_fat_conf_params *cfgs;
4133 int status;
4134 u32 level = 0;
4135 int j;
4136
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004137 if (lancer_chip(adapter))
4138 return 0;
4139
Somnath Kotur941a77d2012-05-17 22:59:03 +00004140 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4141 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4142 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4143 &extfat_cmd.dma);
4144
4145 if (!extfat_cmd.va) {
4146 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4147 __func__);
4148 goto err;
4149 }
4150
4151 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4152 if (!status) {
4153 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4154 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004155 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004156 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4157 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4158 }
4159 }
4160 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4161 extfat_cmd.dma);
4162err:
4163 return level;
4164}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004165
Sathya Perla39f1d942012-05-08 19:41:24 +00004166static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004167{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004168 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004169 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004170
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004171 status = be_cmd_get_cntl_attributes(adapter);
4172 if (status)
4173 return status;
4174
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004175 status = be_cmd_get_acpi_wol_cap(adapter);
4176 if (status) {
4177 /* in case of a failure to get wol capabillities
4178 * check the exclusion list to determine WOL capability */
4179 if (!be_is_wol_excluded(adapter))
4180 adapter->wol_cap |= BE_WOL_CAP;
4181 }
4182
4183 if (be_is_wol_supported(adapter))
4184 adapter->wol = true;
4185
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004186 /* Must be a power of 2 or else MODULO will BUG_ON */
4187 adapter->be_get_temp_freq = 64;
4188
Somnath Kotur941a77d2012-05-17 22:59:03 +00004189 level = be_get_fw_log_level(adapter);
4190 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4191
Sathya Perla92bf14a2013-08-27 16:57:32 +05304192 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004193 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004194}
4195
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004196static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004197{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004198 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004199 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004200
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004201 status = lancer_test_and_set_rdy_state(adapter);
4202 if (status)
4203 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004205 if (netif_running(adapter->netdev))
4206 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004207
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004208 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004209
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004210 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004211
4212 status = be_setup(adapter);
4213 if (status)
4214 goto err;
4215
4216 if (netif_running(adapter->netdev)) {
4217 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004218 if (status)
4219 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004220 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004221
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004222 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004223 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004224err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004225 if (status == -EAGAIN)
4226 dev_err(dev, "Waiting for resource provisioning\n");
4227 else
4228 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004229
4230 return status;
4231}
4232
4233static void be_func_recovery_task(struct work_struct *work)
4234{
4235 struct be_adapter *adapter =
4236 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004237 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004238
4239 be_detect_error(adapter);
4240
4241 if (adapter->hw_error && lancer_chip(adapter)) {
4242
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004243 rtnl_lock();
4244 netif_device_detach(adapter->netdev);
4245 rtnl_unlock();
4246
4247 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004248 if (!status)
4249 netif_device_attach(adapter->netdev);
4250 }
4251
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004252 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4253 * no need to attempt further recovery.
4254 */
4255 if (!status || status == -EAGAIN)
4256 schedule_delayed_work(&adapter->func_recovery_work,
4257 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004258}
4259
4260static void be_worker(struct work_struct *work)
4261{
4262 struct be_adapter *adapter =
4263 container_of(work, struct be_adapter, work.work);
4264 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004265 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004266 int i;
4267
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004268 /* when interrupts are not yet enabled, just reap any pending
4269 * mcc completions */
4270 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004271 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004272 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004273 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004274 goto reschedule;
4275 }
4276
4277 if (!adapter->stats_cmd_sent) {
4278 if (lancer_chip(adapter))
4279 lancer_cmd_get_pport_stats(adapter,
4280 &adapter->stats_cmd);
4281 else
4282 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4283 }
4284
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304285 if (be_physfn(adapter) &&
4286 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004287 be_cmd_get_die_temperature(adapter);
4288
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004289 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004290 if (rxo->rx_post_starved) {
4291 rxo->rx_post_starved = false;
4292 be_post_rx_frags(rxo, GFP_KERNEL);
4293 }
4294 }
4295
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004296 for_all_evt_queues(adapter, eqo, i)
4297 be_eqd_update(adapter, eqo);
4298
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004299reschedule:
4300 adapter->work_counter++;
4301 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4302}
4303
Sathya Perla257a3fe2013-06-14 15:54:51 +05304304/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004305static bool be_reset_required(struct be_adapter *adapter)
4306{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304307 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004308}
4309
Sathya Perlad3791422012-09-28 04:39:44 +00004310static char *mc_name(struct be_adapter *adapter)
4311{
4312 if (adapter->function_mode & FLEX10_MODE)
4313 return "FLEX10";
4314 else if (adapter->function_mode & VNIC_MODE)
4315 return "vNIC";
4316 else if (adapter->function_mode & UMC_ENABLED)
4317 return "UMC";
4318 else
4319 return "";
4320}
4321
4322static inline char *func_name(struct be_adapter *adapter)
4323{
4324 return be_physfn(adapter) ? "PF" : "VF";
4325}
4326
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004327static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004328{
4329 int status = 0;
4330 struct be_adapter *adapter;
4331 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004332 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004333
4334 status = pci_enable_device(pdev);
4335 if (status)
4336 goto do_none;
4337
4338 status = pci_request_regions(pdev, DRV_NAME);
4339 if (status)
4340 goto disable_dev;
4341 pci_set_master(pdev);
4342
Sathya Perla7f640062012-06-05 19:37:20 +00004343 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004344 if (netdev == NULL) {
4345 status = -ENOMEM;
4346 goto rel_reg;
4347 }
4348 adapter = netdev_priv(netdev);
4349 adapter->pdev = pdev;
4350 pci_set_drvdata(pdev, adapter);
4351 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004352 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004353
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004354 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004355 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004356 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4357 if (status < 0) {
4358 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4359 goto free_netdev;
4360 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004361 netdev->features |= NETIF_F_HIGHDMA;
4362 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004363 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304364 if (!status)
4365 status = dma_set_coherent_mask(&pdev->dev,
4366 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004367 if (status) {
4368 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4369 goto free_netdev;
4370 }
4371 }
4372
Sathya Perlad6b6d982012-09-05 01:56:48 +00004373 status = pci_enable_pcie_error_reporting(pdev);
4374 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004375 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004376
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004377 status = be_ctrl_init(adapter);
4378 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004379 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004380
Sathya Perla2243e2e2009-11-22 22:02:03 +00004381 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004382 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004383 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004384 if (status)
4385 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004386 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004387
Sathya Perla39f1d942012-05-08 19:41:24 +00004388 if (be_reset_required(adapter)) {
4389 status = be_cmd_reset_function(adapter);
4390 if (status)
4391 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004392
Kalesh AP2d177be2013-04-28 22:22:29 +00004393 /* Wait for interrupts to quiesce after an FLR */
4394 msleep(100);
4395 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004396
4397 /* Allow interrupts for other ULPs running on NIC function */
4398 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004399
Kalesh AP2d177be2013-04-28 22:22:29 +00004400 /* tell fw we're ready to fire cmds */
4401 status = be_cmd_fw_init(adapter);
4402 if (status)
4403 goto ctrl_clean;
4404
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004405 status = be_stats_init(adapter);
4406 if (status)
4407 goto ctrl_clean;
4408
Sathya Perla39f1d942012-05-08 19:41:24 +00004409 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004410 if (status)
4411 goto stats_clean;
4412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004413 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004414 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004415 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004416
Sathya Perla5fb379e2009-06-18 00:02:59 +00004417 status = be_setup(adapter);
4418 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004419 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004420
Sathya Perla3abcded2010-10-03 22:12:27 -07004421 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004422 status = register_netdev(netdev);
4423 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004424 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425
Parav Pandit045508a2012-03-26 14:27:13 +00004426 be_roce_dev_add(adapter);
4427
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004428 schedule_delayed_work(&adapter->func_recovery_work,
4429 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004430
4431 be_cmd_query_port_name(adapter, &port_name);
4432
Sathya Perlad3791422012-09-28 04:39:44 +00004433 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4434 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436 return 0;
4437
Sathya Perla5fb379e2009-06-18 00:02:59 +00004438unsetup:
4439 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004440stats_clean:
4441 be_stats_cleanup(adapter);
4442ctrl_clean:
4443 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004444free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004445 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004446 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004447rel_reg:
4448 pci_release_regions(pdev);
4449disable_dev:
4450 pci_disable_device(pdev);
4451do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004452 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004453 return status;
4454}
4455
4456static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4457{
4458 struct be_adapter *adapter = pci_get_drvdata(pdev);
4459 struct net_device *netdev = adapter->netdev;
4460
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004461 if (adapter->wol)
4462 be_setup_wol(adapter, true);
4463
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004464 cancel_delayed_work_sync(&adapter->func_recovery_work);
4465
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004466 netif_device_detach(netdev);
4467 if (netif_running(netdev)) {
4468 rtnl_lock();
4469 be_close(netdev);
4470 rtnl_unlock();
4471 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004472 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004473
4474 pci_save_state(pdev);
4475 pci_disable_device(pdev);
4476 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4477 return 0;
4478}
4479
4480static int be_resume(struct pci_dev *pdev)
4481{
4482 int status = 0;
4483 struct be_adapter *adapter = pci_get_drvdata(pdev);
4484 struct net_device *netdev = adapter->netdev;
4485
4486 netif_device_detach(netdev);
4487
4488 status = pci_enable_device(pdev);
4489 if (status)
4490 return status;
4491
Yijing Wang1ca01512013-06-27 20:53:42 +08004492 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004493 pci_restore_state(pdev);
4494
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304495 status = be_fw_wait_ready(adapter);
4496 if (status)
4497 return status;
4498
Sathya Perla2243e2e2009-11-22 22:02:03 +00004499 /* tell fw we're ready to fire cmds */
4500 status = be_cmd_fw_init(adapter);
4501 if (status)
4502 return status;
4503
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004504 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004505 if (netif_running(netdev)) {
4506 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004507 be_open(netdev);
4508 rtnl_unlock();
4509 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004510
4511 schedule_delayed_work(&adapter->func_recovery_work,
4512 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004513 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004514
4515 if (adapter->wol)
4516 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004517
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004518 return 0;
4519}
4520
Sathya Perla82456b02010-02-17 01:35:37 +00004521/*
4522 * An FLR will stop BE from DMAing any data.
4523 */
4524static void be_shutdown(struct pci_dev *pdev)
4525{
4526 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004527
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004528 if (!adapter)
4529 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004530
Sathya Perla0f4a6822011-03-21 20:49:28 +00004531 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004532 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004533
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004534 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004535
Ajit Khaparde57841862011-04-06 18:08:43 +00004536 be_cmd_reset_function(adapter);
4537
Sathya Perla82456b02010-02-17 01:35:37 +00004538 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004539}
4540
Sathya Perlacf588472010-02-14 21:22:01 +00004541static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4542 pci_channel_state_t state)
4543{
4544 struct be_adapter *adapter = pci_get_drvdata(pdev);
4545 struct net_device *netdev = adapter->netdev;
4546
4547 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4548
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004549 if (!adapter->eeh_error) {
4550 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004551
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004552 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004553
Sathya Perlacf588472010-02-14 21:22:01 +00004554 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004555 netif_device_detach(netdev);
4556 if (netif_running(netdev))
4557 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004558 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004559
4560 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004561 }
Sathya Perlacf588472010-02-14 21:22:01 +00004562
4563 if (state == pci_channel_io_perm_failure)
4564 return PCI_ERS_RESULT_DISCONNECT;
4565
4566 pci_disable_device(pdev);
4567
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004568 /* The error could cause the FW to trigger a flash debug dump.
4569 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004570 * can cause it not to recover; wait for it to finish.
4571 * Wait only for first function as it is needed only once per
4572 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004573 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004574 if (pdev->devfn == 0)
4575 ssleep(30);
4576
Sathya Perlacf588472010-02-14 21:22:01 +00004577 return PCI_ERS_RESULT_NEED_RESET;
4578}
4579
4580static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4581{
4582 struct be_adapter *adapter = pci_get_drvdata(pdev);
4583 int status;
4584
4585 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004586
4587 status = pci_enable_device(pdev);
4588 if (status)
4589 return PCI_ERS_RESULT_DISCONNECT;
4590
4591 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004592 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004593 pci_restore_state(pdev);
4594
4595 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004596 dev_info(&adapter->pdev->dev,
4597 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004598 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004599 if (status)
4600 return PCI_ERS_RESULT_DISCONNECT;
4601
Sathya Perlad6b6d982012-09-05 01:56:48 +00004602 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004603 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004604 return PCI_ERS_RESULT_RECOVERED;
4605}
4606
4607static void be_eeh_resume(struct pci_dev *pdev)
4608{
4609 int status = 0;
4610 struct be_adapter *adapter = pci_get_drvdata(pdev);
4611 struct net_device *netdev = adapter->netdev;
4612
4613 dev_info(&adapter->pdev->dev, "EEH resume\n");
4614
4615 pci_save_state(pdev);
4616
Kalesh AP2d177be2013-04-28 22:22:29 +00004617 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004618 if (status)
4619 goto err;
4620
Kalesh AP2d177be2013-04-28 22:22:29 +00004621 /* tell fw we're ready to fire cmds */
4622 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004623 if (status)
4624 goto err;
4625
Sathya Perlacf588472010-02-14 21:22:01 +00004626 status = be_setup(adapter);
4627 if (status)
4628 goto err;
4629
4630 if (netif_running(netdev)) {
4631 status = be_open(netdev);
4632 if (status)
4633 goto err;
4634 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004635
4636 schedule_delayed_work(&adapter->func_recovery_work,
4637 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004638 netif_device_attach(netdev);
4639 return;
4640err:
4641 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004642}
4643
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004644static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004645 .error_detected = be_eeh_err_detected,
4646 .slot_reset = be_eeh_reset,
4647 .resume = be_eeh_resume,
4648};
4649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004650static struct pci_driver be_driver = {
4651 .name = DRV_NAME,
4652 .id_table = be_dev_ids,
4653 .probe = be_probe,
4654 .remove = be_remove,
4655 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004656 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004657 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004658 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004659};
4660
4661static int __init be_init_module(void)
4662{
Joe Perches8e95a202009-12-03 07:58:21 +00004663 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4664 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004665 printk(KERN_WARNING DRV_NAME
4666 " : Module param rx_frag_size must be 2048/4096/8192."
4667 " Using 2048\n");
4668 rx_frag_size = 2048;
4669 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004670
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004671 return pci_register_driver(&be_driver);
4672}
4673module_init(be_init_module);
4674
4675static void __exit be_exit_module(void)
4676{
4677 pci_unregister_driver(&be_driver);
4678}
4679module_exit(be_exit_module);