blob: a254942ed5a609a7148b860ae995ad2618ed7ed8 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
Sathya Perla8788fdc2009-07-27 22:52:03 +0000156static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000160 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000161 return;
162
Sathya Perladb3ea782011-08-22 19:41:52 +0000163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000173
Sathya Perladb3ea782011-08-22 19:41:52 +0000174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000205
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000206 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000207 return;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000224
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000225 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000226 return;
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232}
233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000240 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000241 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
Sathya Perlaa65027e2009-08-17 00:58:04 +0000274 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000275 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 return status;
286}
287
Sathya Perlaca34fe32012-11-06 17:48:56 +0000288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000324
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
Sathya Perlaca34fe32012-11-06 17:48:56 +0000365static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
Selvin Xavier005d5692011-05-16 07:36:35 +0000410static void populate_lancer_stats(struct be_adapter *adapter)
411{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000442 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000447}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000448
Sathya Perla09c1c682011-08-22 19:41:53 +0000449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461void be_parse_stats(struct be_adapter *adapter)
462{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
Sathya Perlaca34fe32012-11-06 17:48:56 +0000467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000469 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000475
Sathya Perlaca34fe32012-11-06 17:48:56 +0000476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000485 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000486}
487
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000491 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700493 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000494 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 u64 pkts, bytes;
496 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700497 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498
Sathya Perla3abcded2010-10-03 22:12:27 -0700499 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700511 }
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
524 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000534 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000537 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000540
Sathya Perlaab1594e2011-07-25 19:10:15 +0000541 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542
543 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000545
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552}
553
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556 struct net_device *netdev = adapter->netdev;
557
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000559 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567}
568
Sathya Perla3c8def92011-06-12 20:01:58 +0000569static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571{
Sathya Perla3c8def92011-06-12 20:01:58 +0000572 struct be_tx_stats *stats = tx_stats(txo);
573
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000580 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 /* to account for hdr wrb */
593 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000610 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611}
612
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
Somnath Koturcc4ce022010-10-21 07:11:14 -0700629static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
630 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000632 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700633
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 memset(hdr, 0, sizeof(*hdr));
635
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
637
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000638 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
641 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000642 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645 if (is_tcp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647 else if (is_udp_pkt(skb))
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649 }
650
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700651 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000653 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655 }
656
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
661}
662
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000663static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000664 bool unmap_single)
665{
666 dma_addr_t dma;
667
668 be_dws_le_to_cpu(wrb, sizeof(*wrb));
669
670 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000671 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000672 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000673 dma_unmap_single(dev, dma, wrb->frag_len,
674 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000675 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000676 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000677 }
678}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679
Sathya Perla3c8def92011-06-12 20:01:58 +0000680static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
682{
Sathya Perla7101e112010-03-22 20:41:12 +0000683 dma_addr_t busaddr;
684 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000685 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 struct be_eth_wrb *wrb;
688 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000689 bool map_single = false;
690 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 hdr = queue_head_node(txq);
693 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000694 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695
David S. Millerebc8d2a2009-06-09 01:01:31 -0700696 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700697 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
699 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000700 goto dma_err;
701 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700702 wrb = queue_head_node(txq);
703 wrb_fill(wrb, busaddr, len);
704 be_dws_cpu_to_le(wrb, sizeof(*wrb));
705 queue_head_inc(txq);
706 copied += len;
707 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708
David S. Millerebc8d2a2009-06-09 01:01:31 -0700709 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000710 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700711 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000712 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000713 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000714 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000715 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700716 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000717 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700718 be_dws_cpu_to_le(wrb, sizeof(*wrb));
719 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000720 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 }
722
723 if (dummy_wrb) {
724 wrb = queue_head_node(txq);
725 wrb_fill(wrb, 0, 0);
726 be_dws_cpu_to_le(wrb, sizeof(*wrb));
727 queue_head_inc(txq);
728 }
729
Somnath Koturcc4ce022010-10-21 07:11:14 -0700730 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 be_dws_cpu_to_le(hdr, sizeof(*hdr));
732
733 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000734dma_err:
735 txq->head = map_head;
736 while (copied) {
737 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000738 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000739 map_single = false;
740 copied -= wrb->frag_len;
741 queue_head_inc(txq);
742 }
743 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744}
745
Somnath Kotur93040ae2012-06-26 22:32:10 +0000746static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
747 struct sk_buff *skb)
748{
749 u16 vlan_tag = 0;
750
751 skb = skb_share_check(skb, GFP_ATOMIC);
752 if (unlikely(!skb))
753 return skb;
754
755 if (vlan_tx_tag_present(skb)) {
756 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Ivan Veceraf11a8692013-04-12 16:49:24 +0200757 skb = __vlan_put_tag(skb, vlan_tag);
758 if (skb)
759 skb->vlan_tci = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000760 }
761
762 return skb;
763}
764
Stephen Hemminger613573252009-08-31 19:50:58 +0000765static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700766 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767{
768 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000769 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
770 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000771 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 bool dummy_wrb, stopped = false;
775
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
777 VLAN_ETH_HLEN : ETH_HLEN;
778
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000779 /* For padded packets, BE HW modifies tot_len field in IP header
780 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000781 */
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000782 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000783 ip = (struct iphdr *)ip_hdr(skb);
784 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
785 }
786
787 /* HW has a bug wherein it will calculate CSUM for VLAN
788 * pkts even though it is disabled.
789 * Manually insert VLAN in pkt.
790 */
791 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000792 vlan_tx_tag_present(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000793 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000794 if (unlikely(!skb))
795 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000796 }
797
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000798 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799
Sathya Perla3c8def92011-06-12 20:01:58 +0000800 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000801 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000802 int gso_segs = skb_shinfo(skb)->gso_segs;
803
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000804 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000805 BUG_ON(txo->sent_skb_list[start]);
806 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000808 /* Ensure txq has space for the next skb; Else stop the queue
809 * *BEFORE* ringing the tx doorbell, so that we serialze the
810 * tx compls of the current transmit which'll wake up the queue
811 */
Sathya Perla7101e112010-03-22 20:41:12 +0000812 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000813 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
814 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000815 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000816 stopped = true;
817 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000819 be_txq_notify(adapter, txq->id, wrb_cnt);
820
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000821 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000822 } else {
823 txq->head = start;
824 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000826tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 return NETDEV_TX_OK;
828}
829
830static int be_change_mtu(struct net_device *netdev, int new_mtu)
831{
832 struct be_adapter *adapter = netdev_priv(netdev);
833 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000834 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
835 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 dev_info(&adapter->pdev->dev,
837 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000838 BE_MIN_MTU,
839 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 return -EINVAL;
841 }
842 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
843 netdev->mtu, new_mtu);
844 netdev->mtu = new_mtu;
845 return 0;
846}
847
848/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000849 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
850 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851 */
Sathya Perla10329df2012-06-05 19:37:18 +0000852static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Sathya Perla10329df2012-06-05 19:37:18 +0000854 u16 vids[BE_NUM_VLANS_SUPPORTED];
855 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000856 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000857
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000858 /* No need to further configure vids if in promiscuous mode */
859 if (adapter->promiscuous)
860 return 0;
861
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000862 if (adapter->vlans_added > adapter->max_vlans)
863 goto set_vlan_promisc;
864
865 /* Construct VLAN Table to give to HW */
866 for (i = 0; i < VLAN_N_VID; i++)
867 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000868 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000869
870 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000871 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000872
873 /* Set to VLAN promisc mode as setting VLAN filter failed */
874 if (status) {
875 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
876 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
877 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000879
Sathya Perlab31c50a2009-09-17 10:30:13 -0700880 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000881
882set_vlan_promisc:
883 status = be_cmd_vlan_config(adapter, adapter->if_handle,
884 NULL, 0, 1, 1);
885 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886}
887
Jiri Pirko8e586132011-12-08 19:52:37 -0500888static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700889{
890 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000891 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000893 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000894 status = -EINVAL;
895 goto ret;
896 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000897
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000898 /* Packets with VID 0 are always received by Lancer by default */
899 if (lancer_chip(adapter) && vid == 0)
900 goto ret;
901
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000903 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000904 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500905
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000906 if (!status)
907 adapter->vlans_added++;
908 else
909 adapter->vlan_tag[vid] = 0;
910ret:
911 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700912}
913
Jiri Pirko8e586132011-12-08 19:52:37 -0500914static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700915{
916 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000917 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000919 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000920 status = -EINVAL;
921 goto ret;
922 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000923
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000924 /* Packets with VID 0 are always received by Lancer by default */
925 if (lancer_chip(adapter) && vid == 0)
926 goto ret;
927
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000929 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000930 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500931
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000932 if (!status)
933 adapter->vlans_added--;
934 else
935 adapter->vlan_tag[vid] = 1;
936ret:
937 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938}
939
Sathya Perlaa54769f2011-10-24 02:45:00 +0000940static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941{
942 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000943 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
945 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000946 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000947 adapter->promiscuous = true;
948 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000950
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300951 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000952 if (adapter->promiscuous) {
953 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000954 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000955
956 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000957 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000958 }
959
Sathya Perlae7b909a2009-11-22 22:01:10 +0000960 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000961 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000962 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000963 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000964 goto done;
965 }
966
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000967 if (netdev_uc_count(netdev) != adapter->uc_macs) {
968 struct netdev_hw_addr *ha;
969 int i = 1; /* First slot is claimed by the Primary MAC */
970
971 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
972 be_cmd_pmac_del(adapter, adapter->if_handle,
973 adapter->pmac_id[i], 0);
974 }
975
976 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
977 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
978 adapter->promiscuous = true;
979 goto done;
980 }
981
982 netdev_for_each_uc_addr(ha, adapter->netdev) {
983 adapter->uc_macs++; /* First slot is for Primary MAC */
984 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
985 adapter->if_handle,
986 &adapter->pmac_id[adapter->uc_macs], 0);
987 }
988 }
989
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000990 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
991
992 /* Set to MCAST promisc mode if setting MULTICAST address fails */
993 if (status) {
994 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
995 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
996 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
997 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000998done:
999 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000}
1001
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001002static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1003{
1004 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001005 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001006 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001007 bool active_mac = false;
1008 u32 pmac_id;
1009 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001010
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001012 return -EPERM;
1013
Sathya Perla11ac75e2011-12-13 00:58:50 +00001014 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001015 return -EINVAL;
1016
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001017 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001018 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1019 &pmac_id, vf + 1);
1020 if (!status && active_mac)
1021 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1022 pmac_id, vf + 1);
1023
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001024 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1025 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001026 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001028
Sathya Perla11ac75e2011-12-13 00:58:50 +00001029 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1030 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001031 }
1032
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001033 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001034 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1035 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001036 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001037 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001038
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001039 return status;
1040}
1041
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001042static int be_get_vf_config(struct net_device *netdev, int vf,
1043 struct ifla_vf_info *vi)
1044{
1045 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001046 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001047
Sathya Perla11ac75e2011-12-13 00:58:50 +00001048 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001049 return -EPERM;
1050
Sathya Perla11ac75e2011-12-13 00:58:50 +00001051 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001052 return -EINVAL;
1053
1054 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001055 vi->tx_rate = vf_cfg->tx_rate;
1056 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001057 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001058 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001059
1060 return 0;
1061}
1062
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001063static int be_set_vf_vlan(struct net_device *netdev,
1064 int vf, u16 vlan, u8 qos)
1065{
1066 struct be_adapter *adapter = netdev_priv(netdev);
1067 int status = 0;
1068
Sathya Perla11ac75e2011-12-13 00:58:50 +00001069 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001070 return -EPERM;
1071
Sathya Perla11ac75e2011-12-13 00:58:50 +00001072 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001073 return -EINVAL;
1074
1075 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001076 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1077 /* If this is new value, program it. Else skip. */
1078 adapter->vf_cfg[vf].vlan_tag = vlan;
1079
1080 status = be_cmd_set_hsw_config(adapter, vlan,
1081 vf + 1, adapter->vf_cfg[vf].if_handle);
1082 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001083 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001084 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001085 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001086 vlan = adapter->vf_cfg[vf].def_vid;
1087 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1088 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001089 }
1090
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001091
1092 if (status)
1093 dev_info(&adapter->pdev->dev,
1094 "VLAN %d config on VF %d failed\n", vlan, vf);
1095 return status;
1096}
1097
Ajit Khapardee1d18732010-07-23 01:52:13 +00001098static int be_set_vf_tx_rate(struct net_device *netdev,
1099 int vf, int rate)
1100{
1101 struct be_adapter *adapter = netdev_priv(netdev);
1102 int status = 0;
1103
Sathya Perla11ac75e2011-12-13 00:58:50 +00001104 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001105 return -EPERM;
1106
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001107 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001108 return -EINVAL;
1109
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001110 if (rate < 100 || rate > 10000) {
1111 dev_err(&adapter->pdev->dev,
1112 "tx rate must be between 100 and 10000 Mbps\n");
1113 return -EINVAL;
1114 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001115
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001116 if (lancer_chip(adapter))
1117 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1118 else
1119 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001120
1121 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001122 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001123 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001124 else
1125 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001126 return status;
1127}
1128
Sathya Perla39f1d942012-05-08 19:41:24 +00001129static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1130{
1131 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001132 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001133 u16 offset, stride;
1134
1135 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001136 if (!pos)
1137 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001138 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1139 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1140
1141 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1142 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001143 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001144 vfs++;
1145 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1146 assigned_vfs++;
1147 }
1148 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1149 }
1150 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1151}
1152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001153static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001155 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001156 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001157 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001158 u64 pkts;
1159 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001160
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001161 if (!eqo->enable_aic) {
1162 eqd = eqo->eqd;
1163 goto modify_eqd;
1164 }
1165
1166 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001167 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001169 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1170
Sathya Perla4097f662009-03-24 16:40:13 -07001171 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001172 if (time_before(now, stats->rx_jiffies)) {
1173 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001174 return;
1175 }
1176
Sathya Perlaac124ff2011-07-25 19:10:14 +00001177 /* Update once a second */
1178 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001179 return;
1180
Sathya Perlaab1594e2011-07-25 19:10:15 +00001181 do {
1182 start = u64_stats_fetch_begin_bh(&stats->sync);
1183 pkts = stats->rx_pkts;
1184 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1185
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001186 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001187 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001188 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001189 eqd = (stats->rx_pps / 110000) << 3;
1190 eqd = min(eqd, eqo->max_eqd);
1191 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001192 if (eqd < 10)
1193 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001194
1195modify_eqd:
1196 if (eqd != eqo->cur_eqd) {
1197 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1198 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001199 }
Sathya Perla4097f662009-03-24 16:40:13 -07001200}
1201
Sathya Perla3abcded2010-10-03 22:12:27 -07001202static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001204{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001205 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001206
Sathya Perlaab1594e2011-07-25 19:10:15 +00001207 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001208 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001210 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001211 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001212 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001213 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001214 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001215 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216}
1217
Sathya Perla2e588f82011-03-11 02:49:26 +00001218static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001219{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001220 /* L4 checksum is not reliable for non TCP/UDP packets.
1221 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001222 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1223 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001224}
1225
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001226static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1227 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001228{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001229 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001231 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232
Sathya Perla3abcded2010-10-03 22:12:27 -07001233 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 BUG_ON(!rx_page_info->page);
1235
Ajit Khaparde205859a2010-02-09 01:34:21 +00001236 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001237 dma_unmap_page(&adapter->pdev->dev,
1238 dma_unmap_addr(rx_page_info, bus),
1239 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001240 rx_page_info->last_page_user = false;
1241 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242
1243 atomic_dec(&rxq->used);
1244 return rx_page_info;
1245}
1246
1247/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001248static void be_rx_compl_discard(struct be_rx_obj *rxo,
1249 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250{
Sathya Perla3abcded2010-10-03 22:12:27 -07001251 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001255 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001256 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001257 put_page(page_info->page);
1258 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260 }
1261}
1262
1263/*
1264 * skb_fill_rx_data forms a complete skb for an ether frame
1265 * indicated by rxcp.
1266 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001267static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1268 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269{
Sathya Perla3abcded2010-10-03 22:12:27 -07001270 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001272 u16 i, j;
1273 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274 u8 *start;
1275
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001276 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 start = page_address(page_info->page) + page_info->page_offset;
1278 prefetch(start);
1279
1280 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 skb->len = curr_frag_len;
1284 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001285 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286 /* Complete packet has now been moved to data */
1287 put_page(page_info->page);
1288 skb->data_len = 0;
1289 skb->tail += curr_frag_len;
1290 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001291 hdr_len = ETH_HLEN;
1292 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001294 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 skb_shinfo(skb)->frags[0].page_offset =
1296 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001297 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001299 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 skb->tail += hdr_len;
1301 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001302 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303
Sathya Perla2e588f82011-03-11 02:49:26 +00001304 if (rxcp->pkt_size <= rx_frag_size) {
1305 BUG_ON(rxcp->num_rcvd != 1);
1306 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307 }
1308
1309 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001310 index_inc(&rxcp->rxq_idx, rxq->len);
1311 remaining = rxcp->pkt_size - curr_frag_len;
1312 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001313 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001314 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001316 /* Coalesce all frags from the same physical page in one slot */
1317 if (page_info->page_offset == 0) {
1318 /* Fresh page */
1319 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001320 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001321 skb_shinfo(skb)->frags[j].page_offset =
1322 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001323 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001324 skb_shinfo(skb)->nr_frags++;
1325 } else {
1326 put_page(page_info->page);
1327 }
1328
Eric Dumazet9e903e02011-10-18 21:00:24 +00001329 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330 skb->len += curr_frag_len;
1331 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001332 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 remaining -= curr_frag_len;
1334 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001335 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001337 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338}
1339
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001340/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341static void be_rx_compl_process(struct be_rx_obj *rxo,
1342 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001345 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001347
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001348 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001349 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001350 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001351 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 return;
1353 }
1354
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001355 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001357 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001358 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001359 else
1360 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001362 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001363 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001365 skb->rxhash = rxcp->rss_hash;
1366
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367
Jiri Pirko343e43c2011-08-25 02:50:51 +00001368 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001369 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1370
1371 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372}
1373
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001374/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1376 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001380 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001381 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 u16 remaining, curr_frag_len;
1383 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001384
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001385 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001386 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001387 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001388 return;
1389 }
1390
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 remaining = rxcp->pkt_size;
1392 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001393 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394
1395 curr_frag_len = min(remaining, rx_frag_size);
1396
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001397 /* Coalesce all frags from the same physical page in one slot */
1398 if (i == 0 || page_info->page_offset == 0) {
1399 /* First frag or Fresh page */
1400 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001401 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001402 skb_shinfo(skb)->frags[j].page_offset =
1403 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001404 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001405 } else {
1406 put_page(page_info->page);
1407 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001408 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001409 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001411 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 memset(page_info, 0, sizeof(*page_info));
1413 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001414 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001416 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001417 skb->len = rxcp->pkt_size;
1418 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001419 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001420 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001421 if (adapter->netdev->features & NETIF_F_RXHASH)
1422 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001423
Jiri Pirko343e43c2011-08-25 02:50:51 +00001424 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001425 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1426
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001427 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428}
1429
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001430static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1431 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432{
Sathya Perla2e588f82011-03-11 02:49:26 +00001433 rxcp->pkt_size =
1434 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1435 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1436 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1437 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001438 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001439 rxcp->ip_csum =
1440 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1441 rxcp->l4_csum =
1442 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1443 rxcp->ipv6 =
1444 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1445 rxcp->rxq_idx =
1446 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1447 rxcp->num_rcvd =
1448 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1449 rxcp->pkt_type =
1450 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001451 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001452 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001453 if (rxcp->vlanf) {
1454 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001455 compl);
1456 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1457 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001458 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001459 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001462static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1463 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001464{
1465 rxcp->pkt_size =
1466 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1467 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1468 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1469 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001470 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001471 rxcp->ip_csum =
1472 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1473 rxcp->l4_csum =
1474 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1475 rxcp->ipv6 =
1476 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1477 rxcp->rxq_idx =
1478 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1479 rxcp->num_rcvd =
1480 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1481 rxcp->pkt_type =
1482 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001483 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001484 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001485 if (rxcp->vlanf) {
1486 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001487 compl);
1488 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1489 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001490 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001491 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001492}
1493
1494static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1495{
1496 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1497 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1498 struct be_adapter *adapter = rxo->adapter;
1499
1500 /* For checking the valid bit it is Ok to use either definition as the
1501 * valid bit is at the same position in both v0 and v1 Rx compl */
1502 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 return NULL;
1504
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001505 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001506 be_dws_le_to_cpu(compl, sizeof(*compl));
1507
1508 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001509 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001510 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001512
Sathya Perla15d72182011-03-21 20:49:26 +00001513 if (rxcp->vlanf) {
1514 /* vlanf could be wrongly set in some cards.
1515 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001516 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001517 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001518
Sathya Perla15d72182011-03-21 20:49:26 +00001519 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001520 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001521
Somnath Kotur939cf302011-08-18 21:51:49 -07001522 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001523 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001524 rxcp->vlanf = 0;
1525 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001526
1527 /* As the compl has been parsed, reset it; we wont touch it again */
1528 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
Sathya Perla3abcded2010-10-03 22:12:27 -07001530 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531 return rxcp;
1532}
1533
Eric Dumazet1829b082011-03-01 05:48:12 +00001534static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001539 gfp |= __GFP_COMP;
1540 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541}
1542
1543/*
1544 * Allocate a page, split it to fragments of size rx_frag_size and post as
1545 * receive buffers to BE
1546 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001547static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548{
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001550 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001551 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 struct page *pagep = NULL;
1553 struct be_eth_rx_d *rxd;
1554 u64 page_dmaaddr = 0, frag_dmaaddr;
1555 u32 posted, page_offset = 0;
1556
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1559 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001560 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001562 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563 break;
1564 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001565 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1566 0, adapter->big_page_size,
1567 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 page_info->page_offset = 0;
1569 } else {
1570 get_page(pagep);
1571 page_info->page_offset = page_offset + rx_frag_size;
1572 }
1573 page_offset = page_info->page_offset;
1574 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001575 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1577
1578 rxd = queue_head_node(rxq);
1579 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1580 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
1582 /* Any space left in the current big page for another frag? */
1583 if ((page_offset + rx_frag_size + rx_frag_size) >
1584 adapter->big_page_size) {
1585 pagep = NULL;
1586 page_info->last_page_user = true;
1587 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001588
1589 prev_page_info = page_info;
1590 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001591 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592 }
1593 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001594 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595
1596 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001598 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001599 } else if (atomic_read(&rxq->used) == 0) {
1600 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001601 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603}
1604
Sathya Perla5fb379e2009-06-18 00:02:59 +00001605static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1608
1609 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1610 return NULL;
1611
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001612 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1614
1615 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1616
1617 queue_tail_inc(tx_cq);
1618 return txcp;
1619}
1620
Sathya Perla3c8def92011-06-12 20:01:58 +00001621static u16 be_tx_compl_process(struct be_adapter *adapter,
1622 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623{
Sathya Perla3c8def92011-06-12 20:01:58 +00001624 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001625 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001626 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001628 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1629 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001631 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001633 sent_skbs[txq->tail] = NULL;
1634
1635 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001636 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001638 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001640 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001641 unmap_tx_frag(&adapter->pdev->dev, wrb,
1642 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001643 unmap_skb_hdr = false;
1644
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645 num_wrbs++;
1646 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001647 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001650 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651}
1652
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001653/* Return the number of events in the event queue */
1654static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001655{
1656 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001658
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001659 do {
1660 eqe = queue_tail_node(&eqo->q);
1661 if (eqe->evt == 0)
1662 break;
1663
1664 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001665 eqe->evt = 0;
1666 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001667 queue_tail_inc(&eqo->q);
1668 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001669
1670 return num;
1671}
1672
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001673/* Leaves the EQ is disarmed state */
1674static void be_eq_clean(struct be_eq_obj *eqo)
1675{
1676 int num = events_get(eqo);
1677
1678 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1679}
1680
1681static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682{
1683 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001684 struct be_queue_info *rxq = &rxo->q;
1685 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001686 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001687 struct be_adapter *adapter = rxo->adapter;
1688 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689 u16 tail;
1690
Sathya Perlad23e9462012-12-17 19:38:51 +00001691 /* Consume pending rx completions.
1692 * Wait for the flush completion (identified by zero num_rcvd)
1693 * to arrive. Notify CQ even when there are no more CQ entries
1694 * for HW to flush partially coalesced CQ entries.
1695 * In Lancer, there is no need to wait for flush compl.
1696 */
1697 for (;;) {
1698 rxcp = be_rx_compl_get(rxo);
1699 if (rxcp == NULL) {
1700 if (lancer_chip(adapter))
1701 break;
1702
1703 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1704 dev_warn(&adapter->pdev->dev,
1705 "did not receive flush compl\n");
1706 break;
1707 }
1708 be_cq_notify(adapter, rx_cq->id, true, 0);
1709 mdelay(1);
1710 } else {
1711 be_rx_compl_discard(rxo, rxcp);
1712 be_cq_notify(adapter, rx_cq->id, true, 1);
1713 if (rxcp->num_rcvd == 0)
1714 break;
1715 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 }
1717
Sathya Perlad23e9462012-12-17 19:38:51 +00001718 /* After cleanup, leave the CQ in unarmed state */
1719 be_cq_notify(adapter, rx_cq->id, false, 0);
1720
1721 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001723 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001724 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 put_page(page_info->page);
1726 memset(page_info, 0, sizeof(*page_info));
1727 }
1728 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001729 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730}
1731
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001732static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001734 struct be_tx_obj *txo;
1735 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001736 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001737 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001738 struct sk_buff *sent_skb;
1739 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001740 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741
Sathya Perlaa8e91792009-08-10 03:42:43 +00001742 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1743 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001744 pending_txqs = adapter->num_tx_qs;
1745
1746 for_all_tx_queues(adapter, txo, i) {
1747 txq = &txo->q;
1748 while ((txcp = be_tx_compl_get(&txo->cq))) {
1749 end_idx =
1750 AMAP_GET_BITS(struct amap_eth_tx_compl,
1751 wrb_index, txcp);
1752 num_wrbs += be_tx_compl_process(adapter, txo,
1753 end_idx);
1754 cmpl++;
1755 }
1756 if (cmpl) {
1757 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1758 atomic_sub(num_wrbs, &txq->used);
1759 cmpl = 0;
1760 num_wrbs = 0;
1761 }
1762 if (atomic_read(&txq->used) == 0)
1763 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001764 }
1765
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001766 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001767 break;
1768
1769 mdelay(1);
1770 } while (true);
1771
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001772 for_all_tx_queues(adapter, txo, i) {
1773 txq = &txo->q;
1774 if (atomic_read(&txq->used))
1775 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1776 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001777
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001778 /* free posted tx for which compls will never arrive */
1779 while (atomic_read(&txq->used)) {
1780 sent_skb = txo->sent_skb_list[txq->tail];
1781 end_idx = txq->tail;
1782 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1783 &dummy_wrb);
1784 index_adv(&end_idx, num_wrbs - 1, txq->len);
1785 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1786 atomic_sub(num_wrbs, &txq->used);
1787 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001788 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789}
1790
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791static void be_evt_queues_destroy(struct be_adapter *adapter)
1792{
1793 struct be_eq_obj *eqo;
1794 int i;
1795
1796 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001797 if (eqo->q.created) {
1798 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001799 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001800 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001801 be_queue_free(adapter, &eqo->q);
1802 }
1803}
1804
1805static int be_evt_queues_create(struct be_adapter *adapter)
1806{
1807 struct be_queue_info *eq;
1808 struct be_eq_obj *eqo;
1809 int i, rc;
1810
1811 adapter->num_evt_qs = num_irqs(adapter);
1812
1813 for_all_evt_queues(adapter, eqo, i) {
1814 eqo->adapter = adapter;
1815 eqo->tx_budget = BE_TX_BUDGET;
1816 eqo->idx = i;
1817 eqo->max_eqd = BE_MAX_EQD;
1818 eqo->enable_aic = true;
1819
1820 eq = &eqo->q;
1821 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1822 sizeof(struct be_eq_entry));
1823 if (rc)
1824 return rc;
1825
1826 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1827 if (rc)
1828 return rc;
1829 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001830 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001831}
1832
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833static void be_mcc_queues_destroy(struct be_adapter *adapter)
1834{
1835 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001836
Sathya Perla8788fdc2009-07-27 22:52:03 +00001837 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001838 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001839 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001840 be_queue_free(adapter, q);
1841
Sathya Perla8788fdc2009-07-27 22:52:03 +00001842 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001843 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001844 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001845 be_queue_free(adapter, q);
1846}
1847
1848/* Must be called only after TX qs are created as MCC shares TX EQ */
1849static int be_mcc_queues_create(struct be_adapter *adapter)
1850{
1851 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001852
Sathya Perla8788fdc2009-07-27 22:52:03 +00001853 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001854 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001855 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001856 goto err;
1857
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001858 /* Use the default EQ for MCC completions */
1859 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001860 goto mcc_cq_free;
1861
Sathya Perla8788fdc2009-07-27 22:52:03 +00001862 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001863 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1864 goto mcc_cq_destroy;
1865
Sathya Perla8788fdc2009-07-27 22:52:03 +00001866 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001867 goto mcc_q_free;
1868
1869 return 0;
1870
1871mcc_q_free:
1872 be_queue_free(adapter, q);
1873mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001874 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001875mcc_cq_free:
1876 be_queue_free(adapter, cq);
1877err:
1878 return -1;
1879}
1880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881static void be_tx_queues_destroy(struct be_adapter *adapter)
1882{
1883 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001884 struct be_tx_obj *txo;
1885 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886
Sathya Perla3c8def92011-06-12 20:01:58 +00001887 for_all_tx_queues(adapter, txo, i) {
1888 q = &txo->q;
1889 if (q->created)
1890 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1891 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892
Sathya Perla3c8def92011-06-12 20:01:58 +00001893 q = &txo->cq;
1894 if (q->created)
1895 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1896 be_queue_free(adapter, q);
1897 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898}
1899
Sathya Perladafc0fe2011-10-24 02:45:02 +00001900static int be_num_txqs_want(struct be_adapter *adapter)
1901{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001902 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1903 be_is_mc(adapter) ||
1904 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001905 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001906 return 1;
1907 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001908 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001909}
1910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001913 struct be_queue_info *cq, *eq;
1914 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001915 struct be_tx_obj *txo;
1916 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917
Sathya Perladafc0fe2011-10-24 02:45:02 +00001918 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001919 if (adapter->num_tx_qs != MAX_TX_QS) {
1920 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001921 netif_set_real_num_tx_queues(adapter->netdev,
1922 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001923 rtnl_unlock();
1924 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001925
Sathya Perla3c8def92011-06-12 20:01:58 +00001926 for_all_tx_queues(adapter, txo, i) {
1927 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001928 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1929 sizeof(struct be_eth_tx_compl));
1930 if (status)
1931 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001933 /* If num_evt_qs is less than num_tx_qs, then more than
1934 * one txq share an eq
1935 */
1936 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1937 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1938 if (status)
1939 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001940 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942}
1943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944static int be_tx_qs_create(struct be_adapter *adapter)
1945{
1946 struct be_tx_obj *txo;
1947 int i, status;
1948
1949 for_all_tx_queues(adapter, txo, i) {
1950 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1951 sizeof(struct be_eth_wrb));
1952 if (status)
1953 return status;
1954
1955 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1956 if (status)
1957 return status;
1958 }
1959
Sathya Perlad3791422012-09-28 04:39:44 +00001960 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1961 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 return 0;
1963}
1964
1965static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966{
1967 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001968 struct be_rx_obj *rxo;
1969 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001972 q = &rxo->cq;
1973 if (q->created)
1974 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1975 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977}
1978
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001979static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001980{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001982 struct be_rx_obj *rxo;
1983 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 /* We'll create as many RSS rings as there are irqs.
1986 * But when there's only one irq there's no use creating RSS rings
1987 */
1988 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1989 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001990 if (adapter->num_rx_qs != MAX_RX_QS) {
1991 rtnl_lock();
1992 netif_set_real_num_rx_queues(adapter->netdev,
1993 adapter->num_rx_qs);
1994 rtnl_unlock();
1995 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001998 for_all_rx_queues(adapter, rxo, i) {
1999 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002000 cq = &rxo->cq;
2001 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2002 sizeof(struct be_eth_rx_compl));
2003 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2007 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002008 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002010 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Sathya Perlad3791422012-09-28 04:39:44 +00002012 dev_info(&adapter->pdev->dev,
2013 "created %d RSS queue(s) and 1 default RX queue\n",
2014 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002016}
2017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018static irqreturn_t be_intx(int irq, void *dev)
2019{
Sathya Perlae49cc342012-11-27 19:50:02 +00002020 struct be_eq_obj *eqo = dev;
2021 struct be_adapter *adapter = eqo->adapter;
2022 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002024 /* IRQ is not expected when NAPI is scheduled as the EQ
2025 * will not be armed.
2026 * But, this can happen on Lancer INTx where it takes
2027 * a while to de-assert INTx or in BE2 where occasionaly
2028 * an interrupt may be raised even when EQ is unarmed.
2029 * If NAPI is already scheduled, then counting & notifying
2030 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002031 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002032 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002033 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002034 __napi_schedule(&eqo->napi);
2035 if (num_evts)
2036 eqo->spurious_intr = 0;
2037 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002038 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002039
2040 /* Return IRQ_HANDLED only for the the first spurious intr
2041 * after a valid intr to stop the kernel from branding
2042 * this irq as a bad one!
2043 */
2044 if (num_evts || eqo->spurious_intr++ == 0)
2045 return IRQ_HANDLED;
2046 else
2047 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048}
2049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002052 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053
Sathya Perla0b545a62012-11-23 00:27:18 +00002054 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2055 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 return IRQ_HANDLED;
2057}
2058
Sathya Perla2e588f82011-03-11 02:49:26 +00002059static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060{
Sathya Perla2e588f82011-03-11 02:49:26 +00002061 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062}
2063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2065 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066{
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 struct be_adapter *adapter = rxo->adapter;
2068 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002069 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 u32 work_done;
2071
2072 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074 if (!rxcp)
2075 break;
2076
Sathya Perla12004ae2011-08-02 19:57:46 +00002077 /* Is it a flush compl that has no data */
2078 if (unlikely(rxcp->num_rcvd == 0))
2079 goto loop_continue;
2080
2081 /* Discard compl with partial DMA Lancer B0 */
2082 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002083 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002084 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002085 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002086
Sathya Perla12004ae2011-08-02 19:57:46 +00002087 /* On BE drop pkts that arrive due to imperfect filtering in
2088 * promiscuous mode on some skews
2089 */
2090 if (unlikely(rxcp->port != adapter->port_num &&
2091 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002093 goto loop_continue;
2094 }
2095
2096 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002098 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002100loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002101 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102 }
2103
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002104 if (work_done) {
2105 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002106
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2108 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111 return work_done;
2112}
2113
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2115 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002120 for (work_done = 0; work_done < budget; work_done++) {
2121 txcp = be_tx_compl_get(&txo->cq);
2122 if (!txcp)
2123 break;
2124 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002125 AMAP_GET_BITS(struct amap_eth_tx_compl,
2126 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 }
2128
2129 if (work_done) {
2130 be_cq_notify(adapter, txo->cq.id, true, work_done);
2131 atomic_sub(num_wrbs, &txo->q.used);
2132
2133 /* As Tx wrbs have been freed up, wake up netdev queue
2134 * if it was stopped due to lack of tx wrbs. */
2135 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2136 atomic_read(&txo->q.used) < txo->q.len / 2) {
2137 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002138 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2141 tx_stats(txo)->tx_compl += work_done;
2142 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2143 }
2144 return (work_done < budget); /* Done */
2145}
Sathya Perla3c8def92011-06-12 20:01:58 +00002146
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147int be_poll(struct napi_struct *napi, int budget)
2148{
2149 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2150 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002151 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002153
Sathya Perla0b545a62012-11-23 00:27:18 +00002154 num_evts = events_get(eqo);
2155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 /* Process all TXQs serviced by this EQ */
2157 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2158 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2159 eqo->tx_budget, i);
2160 if (!tx_done)
2161 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162 }
2163
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164 /* This loop will iterate twice for EQ0 in which
2165 * completions of the last RXQ (default one) are also processed
2166 * For other EQs the loop iterates only once
2167 */
2168 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2169 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2170 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002171 }
2172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 if (is_mcc_eqo(eqo))
2174 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002175
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176 if (max_work < budget) {
2177 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002178 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 } else {
2180 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002181 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002182 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184}
2185
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002186void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002187{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002188 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2189 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002190 u32 i;
2191
Sathya Perlad23e9462012-12-17 19:38:51 +00002192 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002193 return;
2194
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002195 if (lancer_chip(adapter)) {
2196 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2197 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2198 sliport_err1 = ioread32(adapter->db +
2199 SLIPORT_ERROR1_OFFSET);
2200 sliport_err2 = ioread32(adapter->db +
2201 SLIPORT_ERROR2_OFFSET);
2202 }
2203 } else {
2204 pci_read_config_dword(adapter->pdev,
2205 PCICFG_UE_STATUS_LOW, &ue_lo);
2206 pci_read_config_dword(adapter->pdev,
2207 PCICFG_UE_STATUS_HIGH, &ue_hi);
2208 pci_read_config_dword(adapter->pdev,
2209 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2210 pci_read_config_dword(adapter->pdev,
2211 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002212
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002213 ue_lo = (ue_lo & ~ue_lo_mask);
2214 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002215 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002216
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002217 /* On certain platforms BE hardware can indicate spurious UEs.
2218 * Allow the h/w to stop working completely in case of a real UE.
2219 * Hence not setting the hw_error for UE detection.
2220 */
2221 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002222 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002223 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002224 "Error detected in the card\n");
2225 }
2226
2227 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2228 dev_err(&adapter->pdev->dev,
2229 "ERR: sliport status 0x%x\n", sliport_status);
2230 dev_err(&adapter->pdev->dev,
2231 "ERR: sliport error1 0x%x\n", sliport_err1);
2232 dev_err(&adapter->pdev->dev,
2233 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002234 }
2235
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002236 if (ue_lo) {
2237 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2238 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002239 dev_err(&adapter->pdev->dev,
2240 "UE: %s bit set\n", ue_status_low_desc[i]);
2241 }
2242 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002243
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002244 if (ue_hi) {
2245 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2246 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002247 dev_err(&adapter->pdev->dev,
2248 "UE: %s bit set\n", ue_status_hi_desc[i]);
2249 }
2250 }
2251
2252}
2253
Sathya Perla8d56ff12009-11-22 22:02:26 +00002254static void be_msix_disable(struct be_adapter *adapter)
2255{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002256 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002257 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002258 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 }
2260}
2261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262static uint be_num_rss_want(struct be_adapter *adapter)
2263{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002264 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002265
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002266 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002267 (lancer_chip(adapter) ||
2268 (!sriov_want(adapter) && be_physfn(adapter)))) {
2269 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002270 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2271 }
2272 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273}
2274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275static void be_msix_enable(struct be_adapter *adapter)
2276{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002278 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002279 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 /* If RSS queues are not used, need a vec for default RX Q */
2282 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002283 if (be_roce_supported(adapter)) {
2284 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2285 (num_online_cpus() + 1));
2286 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2287 num_vec += num_roce_vec;
2288 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2289 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002291
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002292 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293 adapter->msix_entries[i].entry = i;
2294
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002295 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 if (status == 0) {
2297 goto done;
2298 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002299 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002301 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 }
Sathya Perlad3791422012-09-28 04:39:44 +00002304
2305 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002306 return;
2307done:
Parav Pandit045508a2012-03-26 14:27:13 +00002308 if (be_roce_supported(adapter)) {
2309 if (num_vec > num_roce_vec) {
2310 adapter->num_msix_vec = num_vec - num_roce_vec;
2311 adapter->num_msix_roce_vec =
2312 num_vec - adapter->num_msix_vec;
2313 } else {
2314 adapter->num_msix_vec = num_vec;
2315 adapter->num_msix_roce_vec = 0;
2316 }
2317 } else
2318 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002319 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002320 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321}
2322
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002323static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327}
2328
2329static int be_msix_register(struct be_adapter *adapter)
2330{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 struct net_device *netdev = adapter->netdev;
2332 struct be_eq_obj *eqo;
2333 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 for_all_evt_queues(adapter, eqo, i) {
2336 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2337 vec = be_msix_vec_get(adapter, eqo);
2338 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 if (status)
2340 goto err_msix;
2341 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002342
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002344err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2346 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2347 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2348 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002349 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002350 return status;
2351}
2352
2353static int be_irq_register(struct be_adapter *adapter)
2354{
2355 struct net_device *netdev = adapter->netdev;
2356 int status;
2357
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002358 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359 status = be_msix_register(adapter);
2360 if (status == 0)
2361 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002362 /* INTx is not supported for VF */
2363 if (!be_physfn(adapter))
2364 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365 }
2366
Sathya Perlae49cc342012-11-27 19:50:02 +00002367 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368 netdev->irq = adapter->pdev->irq;
2369 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002370 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371 if (status) {
2372 dev_err(&adapter->pdev->dev,
2373 "INTx request IRQ failed - err %d\n", status);
2374 return status;
2375 }
2376done:
2377 adapter->isr_registered = true;
2378 return 0;
2379}
2380
2381static void be_irq_unregister(struct be_adapter *adapter)
2382{
2383 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
2387 if (!adapter->isr_registered)
2388 return;
2389
2390 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002391 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002392 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393 goto done;
2394 }
2395
2396 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 for_all_evt_queues(adapter, eqo, i)
2398 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002399
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400done:
2401 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402}
2403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002405{
2406 struct be_queue_info *q;
2407 struct be_rx_obj *rxo;
2408 int i;
2409
2410 for_all_rx_queues(adapter, rxo, i) {
2411 q = &rxo->q;
2412 if (q->created) {
2413 be_cmd_rxq_destroy(adapter, q);
2414 /* After the rxq is invalidated, wait for a grace time
2415 * of 1ms for all dma to end and the flush compl to
2416 * arrive
2417 */
2418 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002419 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002420 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002422 }
2423}
2424
Sathya Perla889cd4b2010-05-30 23:33:45 +00002425static int be_close(struct net_device *netdev)
2426{
2427 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 struct be_eq_obj *eqo;
2429 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002430
Parav Pandit045508a2012-03-26 14:27:13 +00002431 be_roce_dev_close(adapter);
2432
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002433 if (!lancer_chip(adapter))
2434 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002435
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002436 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002438
2439 be_async_mcc_disable(adapter);
2440
2441 /* Wait for all pending tx completions to arrive so that
2442 * all tx skbs are freed.
2443 */
2444 be_tx_compl_clean(adapter);
2445
2446 be_rx_qs_destroy(adapter);
2447
2448 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002449 if (msix_enabled(adapter))
2450 synchronize_irq(be_msix_vec_get(adapter, eqo));
2451 else
2452 synchronize_irq(netdev->irq);
2453 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002454 }
2455
Sathya Perla889cd4b2010-05-30 23:33:45 +00002456 be_irq_unregister(adapter);
2457
Sathya Perla482c9e72011-06-29 23:33:17 +00002458 return 0;
2459}
2460
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002461static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002462{
2463 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002464 int rc, i, j;
2465 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002466
2467 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2469 sizeof(struct be_eth_rx_d));
2470 if (rc)
2471 return rc;
2472 }
2473
2474 /* The FW would like the default RXQ to be created first */
2475 rxo = default_rxo(adapter);
2476 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2477 adapter->if_handle, false, &rxo->rss_id);
2478 if (rc)
2479 return rc;
2480
2481 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 rx_frag_size, adapter->if_handle,
2484 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002485 if (rc)
2486 return rc;
2487 }
2488
2489 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002490 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2491 for_all_rss_queues(adapter, rxo, i) {
2492 if ((j + i) >= 128)
2493 break;
2494 rsstable[j + i] = rxo->rss_id;
2495 }
2496 }
2497 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002498 if (rc)
2499 return rc;
2500 }
2501
2502 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002503 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002504 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002505 return 0;
2506}
2507
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508static int be_open(struct net_device *netdev)
2509{
2510 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002511 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002512 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002513 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002514 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002515 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002516
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002518 if (status)
2519 goto err;
2520
Sathya Perla5fb379e2009-06-18 00:02:59 +00002521 be_irq_register(adapter);
2522
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002523 if (!lancer_chip(adapter))
2524 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002525
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002527 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002528
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529 for_all_tx_queues(adapter, txo, i)
2530 be_cq_notify(adapter, txo->cq.id, true, 0);
2531
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002532 be_async_mcc_enable(adapter);
2533
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002534 for_all_evt_queues(adapter, eqo, i) {
2535 napi_enable(&eqo->napi);
2536 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2537 }
2538
Sathya Perla323ff712012-09-28 04:39:43 +00002539 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002540 if (!status)
2541 be_link_status_update(adapter, link_status);
2542
Parav Pandit045508a2012-03-26 14:27:13 +00002543 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002544 return 0;
2545err:
2546 be_close(adapter->netdev);
2547 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002548}
2549
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002550static int be_setup_wol(struct be_adapter *adapter, bool enable)
2551{
2552 struct be_dma_mem cmd;
2553 int status = 0;
2554 u8 mac[ETH_ALEN];
2555
2556 memset(mac, 0, ETH_ALEN);
2557
2558 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002559 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2560 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002561 if (cmd.va == NULL)
2562 return -1;
2563 memset(cmd.va, 0, cmd.size);
2564
2565 if (enable) {
2566 status = pci_write_config_dword(adapter->pdev,
2567 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2568 if (status) {
2569 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002570 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002571 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2572 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002573 return status;
2574 }
2575 status = be_cmd_enable_magic_wol(adapter,
2576 adapter->netdev->dev_addr, &cmd);
2577 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2578 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2579 } else {
2580 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2581 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2582 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2583 }
2584
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002585 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002586 return status;
2587}
2588
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002589/*
2590 * Generate a seed MAC address from the PF MAC Address using jhash.
2591 * MAC Address for VFs are assigned incrementally starting from the seed.
2592 * These addresses are programmed in the ASIC by the PF and the VF driver
2593 * queries for the MAC address during its probe.
2594 */
Sathya Perla4c876612013-02-03 20:30:11 +00002595static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002596{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002598 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002599 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002600 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002601
2602 be_vf_eth_addr_generate(adapter, mac);
2603
Sathya Perla11ac75e2011-12-13 00:58:50 +00002604 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002605 if (lancer_chip(adapter)) {
2606 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2607 } else {
2608 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002609 vf_cfg->if_handle,
2610 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002611 }
2612
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002613 if (status)
2614 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002615 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002616 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002617 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002618
2619 mac[5] += 1;
2620 }
2621 return status;
2622}
2623
Sathya Perla4c876612013-02-03 20:30:11 +00002624static int be_vfs_mac_query(struct be_adapter *adapter)
2625{
2626 int status, vf;
2627 u8 mac[ETH_ALEN];
2628 struct be_vf_cfg *vf_cfg;
2629 bool active;
2630
2631 for_all_vfs(adapter, vf_cfg, vf) {
2632 be_cmd_get_mac_from_list(adapter, mac, &active,
2633 &vf_cfg->pmac_id, 0);
2634
2635 status = be_cmd_mac_addr_query(adapter, mac, false,
2636 vf_cfg->if_handle, 0);
2637 if (status)
2638 return status;
2639 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2640 }
2641 return 0;
2642}
2643
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002644static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002645{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002646 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002647 u32 vf;
2648
Sathya Perla39f1d942012-05-08 19:41:24 +00002649 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002650 dev_warn(&adapter->pdev->dev,
2651 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002652 goto done;
2653 }
2654
Sathya Perla11ac75e2011-12-13 00:58:50 +00002655 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002656 if (lancer_chip(adapter))
2657 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2658 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002659 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2660 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002661
Sathya Perla11ac75e2011-12-13 00:58:50 +00002662 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2663 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002664 pci_disable_sriov(adapter->pdev);
2665done:
2666 kfree(adapter->vf_cfg);
2667 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002668}
2669
Sathya Perlaa54769f2011-10-24 02:45:00 +00002670static int be_clear(struct be_adapter *adapter)
2671{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002672 int i = 1;
2673
Sathya Perla191eb752012-02-23 18:50:13 +00002674 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2675 cancel_delayed_work_sync(&adapter->work);
2676 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2677 }
2678
Sathya Perla11ac75e2011-12-13 00:58:50 +00002679 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002680 be_vf_clear(adapter);
2681
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002682 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2683 be_cmd_pmac_del(adapter, adapter->if_handle,
2684 adapter->pmac_id[i], 0);
2685
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002686 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002687
2688 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002689 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002690 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002691 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002692
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002693 kfree(adapter->pmac_id);
2694 adapter->pmac_id = NULL;
2695
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002696 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002697 return 0;
2698}
2699
Sathya Perla4c876612013-02-03 20:30:11 +00002700static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002701{
Sathya Perla4c876612013-02-03 20:30:11 +00002702 struct be_vf_cfg *vf_cfg;
2703 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002704 int status;
2705
Sathya Perla4c876612013-02-03 20:30:11 +00002706 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2707 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002708
Sathya Perla4c876612013-02-03 20:30:11 +00002709 for_all_vfs(adapter, vf_cfg, vf) {
2710 if (!BE3_chip(adapter))
2711 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2712
2713 /* If a FW profile exists, then cap_flags are updated */
2714 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2715 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2716 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2717 &vf_cfg->if_handle, vf + 1);
2718 if (status)
2719 goto err;
2720 }
2721err:
2722 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002723}
2724
Sathya Perla39f1d942012-05-08 19:41:24 +00002725static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002726{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002727 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002728 int vf;
2729
Sathya Perla39f1d942012-05-08 19:41:24 +00002730 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2731 GFP_KERNEL);
2732 if (!adapter->vf_cfg)
2733 return -ENOMEM;
2734
Sathya Perla11ac75e2011-12-13 00:58:50 +00002735 for_all_vfs(adapter, vf_cfg, vf) {
2736 vf_cfg->if_handle = -1;
2737 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002738 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002739 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002740}
2741
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002742static int be_vf_setup(struct be_adapter *adapter)
2743{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002744 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002745 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002746 int status, old_vfs, vf;
2747 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002748
Sathya Perla4c876612013-02-03 20:30:11 +00002749 old_vfs = be_find_vfs(adapter, ENABLED);
2750 if (old_vfs) {
2751 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2752 if (old_vfs != num_vfs)
2753 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2754 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002755 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002756 if (num_vfs > adapter->dev_num_vfs)
2757 dev_info(dev, "Device supports %d VFs and not %d\n",
2758 adapter->dev_num_vfs, num_vfs);
2759 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2760
2761 status = pci_enable_sriov(adapter->pdev, num_vfs);
2762 if (status) {
2763 dev_err(dev, "SRIOV enable failed\n");
2764 adapter->num_vfs = 0;
2765 return 0;
2766 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002767 }
2768
2769 status = be_vf_setup_init(adapter);
2770 if (status)
2771 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002772
Sathya Perla4c876612013-02-03 20:30:11 +00002773 if (old_vfs) {
2774 for_all_vfs(adapter, vf_cfg, vf) {
2775 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2776 if (status)
2777 goto err;
2778 }
2779 } else {
2780 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002781 if (status)
2782 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002783 }
2784
Sathya Perla4c876612013-02-03 20:30:11 +00002785 if (old_vfs) {
2786 status = be_vfs_mac_query(adapter);
2787 if (status)
2788 goto err;
2789 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002790 status = be_vf_eth_addr_config(adapter);
2791 if (status)
2792 goto err;
2793 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002794
Sathya Perla11ac75e2011-12-13 00:58:50 +00002795 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002796 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2797 * Allow full available bandwidth
2798 */
2799 if (BE3_chip(adapter) && !old_vfs)
2800 be_cmd_set_qos(adapter, 1000, vf+1);
2801
2802 status = be_cmd_link_status_query(adapter, &lnk_speed,
2803 NULL, vf + 1);
2804 if (!status)
2805 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002806
2807 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002808 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002809 if (status)
2810 goto err;
2811 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002812
2813 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002814 }
2815 return 0;
2816err:
Sathya Perla4c876612013-02-03 20:30:11 +00002817 dev_err(dev, "VF setup failed\n");
2818 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002819 return status;
2820}
2821
Sathya Perla30128032011-11-10 19:17:57 +00002822static void be_setup_init(struct be_adapter *adapter)
2823{
2824 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002825 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002826 adapter->if_handle = -1;
2827 adapter->be3_native = false;
2828 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002829 if (be_physfn(adapter))
2830 adapter->cmd_privileges = MAX_PRIVILEGES;
2831 else
2832 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002833}
2834
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002835static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2836 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002837{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002838 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002839
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002840 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2841 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2842 if (!lancer_chip(adapter) && !be_physfn(adapter))
2843 *active_mac = true;
2844 else
2845 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002846
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002847 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002848 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002849
2850 if (lancer_chip(adapter)) {
2851 status = be_cmd_get_mac_from_list(adapter, mac,
2852 active_mac, pmac_id, 0);
2853 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002854 status = be_cmd_mac_addr_query(adapter, mac, false,
2855 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002856 }
2857 } else if (be_physfn(adapter)) {
2858 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002859 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002860 *active_mac = false;
2861 } else {
2862 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002863 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002864 if_handle, 0);
2865 *active_mac = true;
2866 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002867 return status;
2868}
2869
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002870static void be_get_resources(struct be_adapter *adapter)
2871{
Sathya Perla4c876612013-02-03 20:30:11 +00002872 u16 dev_num_vfs;
2873 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002874 bool profile_present = false;
2875
Sathya Perla4c876612013-02-03 20:30:11 +00002876 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002877 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002878 if (!status)
2879 profile_present = true;
2880 }
2881
2882 if (profile_present) {
2883 /* Sanity fixes for Lancer */
2884 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2885 BE_UC_PMAC_COUNT);
2886 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2887 BE_NUM_VLANS_SUPPORTED);
2888 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2889 BE_MAX_MC);
2890 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2891 MAX_TX_QS);
2892 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2893 BE3_MAX_RSS_QS);
2894 adapter->max_event_queues = min_t(u16,
2895 adapter->max_event_queues,
2896 BE3_MAX_RSS_QS);
2897
2898 if (adapter->max_rss_queues &&
2899 adapter->max_rss_queues == adapter->max_rx_queues)
2900 adapter->max_rss_queues -= 1;
2901
2902 if (adapter->max_event_queues < adapter->max_rss_queues)
2903 adapter->max_rss_queues = adapter->max_event_queues;
2904
2905 } else {
2906 if (be_physfn(adapter))
2907 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2908 else
2909 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2910
2911 if (adapter->function_mode & FLEX10_MODE)
2912 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2913 else
2914 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2915
2916 adapter->max_mcast_mac = BE_MAX_MC;
2917 adapter->max_tx_queues = MAX_TX_QS;
2918 adapter->max_rss_queues = (adapter->be3_native) ?
2919 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2920 adapter->max_event_queues = BE3_MAX_RSS_QS;
2921
2922 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2923 BE_IF_FLAGS_BROADCAST |
2924 BE_IF_FLAGS_MULTICAST |
2925 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2926 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2927 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2928 BE_IF_FLAGS_PROMISCUOUS;
2929
2930 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2931 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2932 }
Sathya Perla4c876612013-02-03 20:30:11 +00002933
2934 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2935 if (pos) {
2936 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2937 &dev_num_vfs);
2938 if (BE3_chip(adapter))
2939 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2940 adapter->dev_num_vfs = dev_num_vfs;
2941 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002942}
2943
Sathya Perla39f1d942012-05-08 19:41:24 +00002944/* Routine to query per function resource limits */
2945static int be_get_config(struct be_adapter *adapter)
2946{
Sathya Perla4c876612013-02-03 20:30:11 +00002947 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002948
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002949 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2950 &adapter->function_mode,
2951 &adapter->function_caps);
2952 if (status)
2953 goto err;
2954
2955 be_get_resources(adapter);
2956
2957 /* primary mac needs 1 pmac entry */
2958 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2959 sizeof(u32), GFP_KERNEL);
2960 if (!adapter->pmac_id) {
2961 status = -ENOMEM;
2962 goto err;
2963 }
2964
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002965err:
2966 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002967}
2968
Sathya Perla5fb379e2009-06-18 00:02:59 +00002969static int be_setup(struct be_adapter *adapter)
2970{
Sathya Perla39f1d942012-05-08 19:41:24 +00002971 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002972 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002973 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002974 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002975 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002976 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977
Sathya Perla30128032011-11-10 19:17:57 +00002978 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002979
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002980 if (!lancer_chip(adapter))
2981 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002982
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002983 status = be_get_config(adapter);
2984 if (status)
2985 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002986
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002987 be_msix_enable(adapter);
2988
2989 status = be_evt_queues_create(adapter);
2990 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002991 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002993 status = be_tx_cqs_create(adapter);
2994 if (status)
2995 goto err;
2996
2997 status = be_rx_cqs_create(adapter);
2998 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002999 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003000
Sathya Perla5fb379e2009-06-18 00:02:59 +00003001 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003002 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003003 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003005 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3006 /* In UMC mode FW does not return right privileges.
3007 * Override with correct privilege equivalent to PF.
3008 */
3009 if (be_is_mc(adapter))
3010 adapter->cmd_privileges = MAX_PRIVILEGES;
3011
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003012 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3013 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003014
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003015 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003016 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003017
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003018 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003019
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003020 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003021 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003022 if (status != 0)
3023 goto err;
3024
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003025 memset(mac, 0, ETH_ALEN);
3026 active_mac = false;
3027 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3028 &active_mac, &adapter->pmac_id[0]);
3029 if (status != 0)
3030 goto err;
3031
3032 if (!active_mac) {
3033 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3034 &adapter->pmac_id[0], 0);
3035 if (status != 0)
3036 goto err;
3037 }
3038
3039 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3040 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3041 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003042 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003043
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003044 status = be_tx_qs_create(adapter);
3045 if (status)
3046 goto err;
3047
Sathya Perla04b71172011-09-27 13:30:27 -04003048 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003049
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003050 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003051 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003052
3053 be_set_rx_mode(adapter->netdev);
3054
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003055 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003056
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003057 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3058 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003059 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003060
Sathya Perla39f1d942012-05-08 19:41:24 +00003061 if (be_physfn(adapter) && num_vfs) {
3062 if (adapter->dev_num_vfs)
3063 be_vf_setup(adapter);
3064 else
3065 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003066 }
3067
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003068 status = be_cmd_get_phy_info(adapter);
3069 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003070 adapter->phy.fc_autoneg = 1;
3071
Sathya Perla191eb752012-02-23 18:50:13 +00003072 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3073 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003074 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003075err:
3076 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077 return status;
3078}
3079
Ivan Vecera66268732011-12-08 01:31:21 +00003080#ifdef CONFIG_NET_POLL_CONTROLLER
3081static void be_netpoll(struct net_device *netdev)
3082{
3083 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003084 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003085 int i;
3086
Sathya Perlae49cc342012-11-27 19:50:02 +00003087 for_all_evt_queues(adapter, eqo, i) {
3088 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3089 napi_schedule(&eqo->napi);
3090 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003091
3092 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003093}
3094#endif
3095
Ajit Khaparde84517482009-09-04 03:12:16 +00003096#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003097char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3098
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003099static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003100 const u8 *p, u32 img_start, int image_size,
3101 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003102{
3103 u32 crc_offset;
3104 u8 flashed_crc[4];
3105 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003106
3107 crc_offset = hdr_size + img_start + image_size - 4;
3108
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003109 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003110
3111 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003112 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003113 if (status) {
3114 dev_err(&adapter->pdev->dev,
3115 "could not get crc from flash, not flashing redboot\n");
3116 return false;
3117 }
3118
3119 /*update redboot only if crc does not match*/
3120 if (!memcmp(flashed_crc, p, 4))
3121 return false;
3122 else
3123 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003124}
3125
Sathya Perla306f1342011-08-02 19:57:45 +00003126static bool phy_flashing_required(struct be_adapter *adapter)
3127{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003128 return (adapter->phy.phy_type == TN_8022 &&
3129 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003130}
3131
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003132static bool is_comp_in_ufi(struct be_adapter *adapter,
3133 struct flash_section_info *fsec, int type)
3134{
3135 int i = 0, img_type = 0;
3136 struct flash_section_info_g2 *fsec_g2 = NULL;
3137
Sathya Perlaca34fe32012-11-06 17:48:56 +00003138 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003139 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3140
3141 for (i = 0; i < MAX_FLASH_COMP; i++) {
3142 if (fsec_g2)
3143 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3144 else
3145 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3146
3147 if (img_type == type)
3148 return true;
3149 }
3150 return false;
3151
3152}
3153
3154struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3155 int header_size,
3156 const struct firmware *fw)
3157{
3158 struct flash_section_info *fsec = NULL;
3159 const u8 *p = fw->data;
3160
3161 p += header_size;
3162 while (p < (fw->data + fw->size)) {
3163 fsec = (struct flash_section_info *)p;
3164 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3165 return fsec;
3166 p += 32;
3167 }
3168 return NULL;
3169}
3170
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003171static int be_flash(struct be_adapter *adapter, const u8 *img,
3172 struct be_dma_mem *flash_cmd, int optype, int img_size)
3173{
3174 u32 total_bytes = 0, flash_op, num_bytes = 0;
3175 int status = 0;
3176 struct be_cmd_write_flashrom *req = flash_cmd->va;
3177
3178 total_bytes = img_size;
3179 while (total_bytes) {
3180 num_bytes = min_t(u32, 32*1024, total_bytes);
3181
3182 total_bytes -= num_bytes;
3183
3184 if (!total_bytes) {
3185 if (optype == OPTYPE_PHY_FW)
3186 flash_op = FLASHROM_OPER_PHY_FLASH;
3187 else
3188 flash_op = FLASHROM_OPER_FLASH;
3189 } else {
3190 if (optype == OPTYPE_PHY_FW)
3191 flash_op = FLASHROM_OPER_PHY_SAVE;
3192 else
3193 flash_op = FLASHROM_OPER_SAVE;
3194 }
3195
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003196 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003197 img += num_bytes;
3198 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3199 flash_op, num_bytes);
3200 if (status) {
3201 if (status == ILLEGAL_IOCTL_REQ &&
3202 optype == OPTYPE_PHY_FW)
3203 break;
3204 dev_err(&adapter->pdev->dev,
3205 "cmd to write to flash rom failed.\n");
3206 return status;
3207 }
3208 }
3209 return 0;
3210}
3211
Sathya Perlaca34fe32012-11-06 17:48:56 +00003212/* For BE2 and BE3 */
3213static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003214 const struct firmware *fw,
3215 struct be_dma_mem *flash_cmd,
3216 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003217
Ajit Khaparde84517482009-09-04 03:12:16 +00003218{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003219 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003220 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003221 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003222 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003223 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003224 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003225
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003226 struct flash_comp gen3_flash_types[] = {
3227 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3228 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3229 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3230 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3231 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3232 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3233 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3234 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3235 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3236 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3237 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3238 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3239 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3240 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3241 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3242 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3243 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3244 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3245 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3246 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003247 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003248
3249 struct flash_comp gen2_flash_types[] = {
3250 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3251 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3252 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3253 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3254 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3255 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3256 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3257 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3258 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3259 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3260 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3261 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3262 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3263 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3264 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3265 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003266 };
3267
Sathya Perlaca34fe32012-11-06 17:48:56 +00003268 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003269 pflashcomp = gen3_flash_types;
3270 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003271 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003272 } else {
3273 pflashcomp = gen2_flash_types;
3274 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003275 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003276 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003277
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003278 /* Get flash section info*/
3279 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3280 if (!fsec) {
3281 dev_err(&adapter->pdev->dev,
3282 "Invalid Cookie. UFI corrupted ?\n");
3283 return -1;
3284 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003285 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003286 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003287 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003288
3289 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3290 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3291 continue;
3292
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003293 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3294 !phy_flashing_required(adapter))
3295 continue;
3296
3297 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3298 redboot = be_flash_redboot(adapter, fw->data,
3299 pflashcomp[i].offset, pflashcomp[i].size,
3300 filehdr_size + img_hdrs_size);
3301 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003302 continue;
3303 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003304
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003305 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003306 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003307 if (p + pflashcomp[i].size > fw->data + fw->size)
3308 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003309
3310 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3311 pflashcomp[i].size);
3312 if (status) {
3313 dev_err(&adapter->pdev->dev,
3314 "Flashing section type %d failed.\n",
3315 pflashcomp[i].img_type);
3316 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003317 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003318 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003319 return 0;
3320}
3321
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003322static int be_flash_skyhawk(struct be_adapter *adapter,
3323 const struct firmware *fw,
3324 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003325{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003326 int status = 0, i, filehdr_size = 0;
3327 int img_offset, img_size, img_optype, redboot;
3328 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3329 const u8 *p = fw->data;
3330 struct flash_section_info *fsec = NULL;
3331
3332 filehdr_size = sizeof(struct flash_file_hdr_g3);
3333 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3334 if (!fsec) {
3335 dev_err(&adapter->pdev->dev,
3336 "Invalid Cookie. UFI corrupted ?\n");
3337 return -1;
3338 }
3339
3340 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3341 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3342 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3343
3344 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3345 case IMAGE_FIRMWARE_iSCSI:
3346 img_optype = OPTYPE_ISCSI_ACTIVE;
3347 break;
3348 case IMAGE_BOOT_CODE:
3349 img_optype = OPTYPE_REDBOOT;
3350 break;
3351 case IMAGE_OPTION_ROM_ISCSI:
3352 img_optype = OPTYPE_BIOS;
3353 break;
3354 case IMAGE_OPTION_ROM_PXE:
3355 img_optype = OPTYPE_PXE_BIOS;
3356 break;
3357 case IMAGE_OPTION_ROM_FCoE:
3358 img_optype = OPTYPE_FCOE_BIOS;
3359 break;
3360 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3361 img_optype = OPTYPE_ISCSI_BACKUP;
3362 break;
3363 case IMAGE_NCSI:
3364 img_optype = OPTYPE_NCSI_FW;
3365 break;
3366 default:
3367 continue;
3368 }
3369
3370 if (img_optype == OPTYPE_REDBOOT) {
3371 redboot = be_flash_redboot(adapter, fw->data,
3372 img_offset, img_size,
3373 filehdr_size + img_hdrs_size);
3374 if (!redboot)
3375 continue;
3376 }
3377
3378 p = fw->data;
3379 p += filehdr_size + img_offset + img_hdrs_size;
3380 if (p + img_size > fw->data + fw->size)
3381 return -1;
3382
3383 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3384 if (status) {
3385 dev_err(&adapter->pdev->dev,
3386 "Flashing section type %d failed.\n",
3387 fsec->fsec_entry[i].type);
3388 return status;
3389 }
3390 }
3391 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003392}
3393
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003394static int lancer_wait_idle(struct be_adapter *adapter)
3395{
3396#define SLIPORT_IDLE_TIMEOUT 30
3397 u32 reg_val;
3398 int status = 0, i;
3399
3400 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3401 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3402 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3403 break;
3404
3405 ssleep(1);
3406 }
3407
3408 if (i == SLIPORT_IDLE_TIMEOUT)
3409 status = -1;
3410
3411 return status;
3412}
3413
3414static int lancer_fw_reset(struct be_adapter *adapter)
3415{
3416 int status = 0;
3417
3418 status = lancer_wait_idle(adapter);
3419 if (status)
3420 return status;
3421
3422 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3423 PHYSDEV_CONTROL_OFFSET);
3424
3425 return status;
3426}
3427
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003428static int lancer_fw_download(struct be_adapter *adapter,
3429 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003430{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003431#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3432#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3433 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003434 const u8 *data_ptr = NULL;
3435 u8 *dest_image_ptr = NULL;
3436 size_t image_size = 0;
3437 u32 chunk_size = 0;
3438 u32 data_written = 0;
3439 u32 offset = 0;
3440 int status = 0;
3441 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003442 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003443
3444 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3445 dev_err(&adapter->pdev->dev,
3446 "FW Image not properly aligned. "
3447 "Length must be 4 byte aligned.\n");
3448 status = -EINVAL;
3449 goto lancer_fw_exit;
3450 }
3451
3452 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3453 + LANCER_FW_DOWNLOAD_CHUNK;
3454 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3455 &flash_cmd.dma, GFP_KERNEL);
3456 if (!flash_cmd.va) {
3457 status = -ENOMEM;
3458 dev_err(&adapter->pdev->dev,
3459 "Memory allocation failure while flashing\n");
3460 goto lancer_fw_exit;
3461 }
3462
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003463 dest_image_ptr = flash_cmd.va +
3464 sizeof(struct lancer_cmd_req_write_object);
3465 image_size = fw->size;
3466 data_ptr = fw->data;
3467
3468 while (image_size) {
3469 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3470
3471 /* Copy the image chunk content. */
3472 memcpy(dest_image_ptr, data_ptr, chunk_size);
3473
3474 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003475 chunk_size, offset,
3476 LANCER_FW_DOWNLOAD_LOCATION,
3477 &data_written, &change_status,
3478 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003479 if (status)
3480 break;
3481
3482 offset += data_written;
3483 data_ptr += data_written;
3484 image_size -= data_written;
3485 }
3486
3487 if (!status) {
3488 /* Commit the FW written */
3489 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003490 0, offset,
3491 LANCER_FW_DOWNLOAD_LOCATION,
3492 &data_written, &change_status,
3493 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003494 }
3495
3496 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3497 flash_cmd.dma);
3498 if (status) {
3499 dev_err(&adapter->pdev->dev,
3500 "Firmware load error. "
3501 "Status code: 0x%x Additional Status: 0x%x\n",
3502 status, add_status);
3503 goto lancer_fw_exit;
3504 }
3505
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003506 if (change_status == LANCER_FW_RESET_NEEDED) {
3507 status = lancer_fw_reset(adapter);
3508 if (status) {
3509 dev_err(&adapter->pdev->dev,
3510 "Adapter busy for FW reset.\n"
3511 "New FW will not be active.\n");
3512 goto lancer_fw_exit;
3513 }
3514 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3515 dev_err(&adapter->pdev->dev,
3516 "System reboot required for new FW"
3517 " to be active\n");
3518 }
3519
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003520 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3521lancer_fw_exit:
3522 return status;
3523}
3524
Sathya Perlaca34fe32012-11-06 17:48:56 +00003525#define UFI_TYPE2 2
3526#define UFI_TYPE3 3
3527#define UFI_TYPE4 4
3528static int be_get_ufi_type(struct be_adapter *adapter,
3529 struct flash_file_hdr_g2 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003530{
3531 if (fhdr == NULL)
3532 goto be_get_ufi_exit;
3533
Sathya Perlaca34fe32012-11-06 17:48:56 +00003534 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3535 return UFI_TYPE4;
3536 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3537 return UFI_TYPE3;
3538 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3539 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003540
3541be_get_ufi_exit:
3542 dev_err(&adapter->pdev->dev,
3543 "UFI and Interface are not compatible for flashing\n");
3544 return -1;
3545}
3546
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003547static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3548{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003549 struct flash_file_hdr_g2 *fhdr;
3550 struct flash_file_hdr_g3 *fhdr3;
3551 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003552 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003553 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003554 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003555
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003556 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003557 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3558 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003559 if (!flash_cmd.va) {
3560 status = -ENOMEM;
3561 dev_err(&adapter->pdev->dev,
3562 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003563 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003564 }
3565
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003566 p = fw->data;
3567 fhdr = (struct flash_file_hdr_g2 *)p;
3568
Sathya Perlaca34fe32012-11-06 17:48:56 +00003569 ufi_type = be_get_ufi_type(adapter, fhdr);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003570
3571 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3572 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3573 for (i = 0; i < num_imgs; i++) {
3574 img_hdr_ptr = (struct image_hdr *)(fw->data +
3575 (sizeof(struct flash_file_hdr_g3) +
3576 i * sizeof(struct image_hdr)));
3577 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Sathya Perlaca34fe32012-11-06 17:48:56 +00003578 if (ufi_type == UFI_TYPE4)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003579 status = be_flash_skyhawk(adapter, fw,
3580 &flash_cmd, num_imgs);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003581 else if (ufi_type == UFI_TYPE3)
3582 status = be_flash_BEx(adapter, fw, &flash_cmd,
3583 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003584 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003585 }
3586
Sathya Perlaca34fe32012-11-06 17:48:56 +00003587 if (ufi_type == UFI_TYPE2)
3588 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003589 else if (ufi_type == -1)
3590 status = -1;
3591
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003592 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3593 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003594 if (status) {
3595 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003596 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003597 }
3598
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003599 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003600
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003601be_fw_exit:
3602 return status;
3603}
3604
3605int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3606{
3607 const struct firmware *fw;
3608 int status;
3609
3610 if (!netif_running(adapter->netdev)) {
3611 dev_err(&adapter->pdev->dev,
3612 "Firmware load not allowed (interface is down)\n");
3613 return -1;
3614 }
3615
3616 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3617 if (status)
3618 goto fw_exit;
3619
3620 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3621
3622 if (lancer_chip(adapter))
3623 status = lancer_fw_download(adapter, fw);
3624 else
3625 status = be_fw_download(adapter, fw);
3626
Ajit Khaparde84517482009-09-04 03:12:16 +00003627fw_exit:
3628 release_firmware(fw);
3629 return status;
3630}
3631
stephen hemmingere5686ad2012-01-05 19:10:25 +00003632static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633 .ndo_open = be_open,
3634 .ndo_stop = be_close,
3635 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003636 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003637 .ndo_set_mac_address = be_mac_addr_set,
3638 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003639 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003640 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3642 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003643 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003644 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003645 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003646 .ndo_get_vf_config = be_get_vf_config,
3647#ifdef CONFIG_NET_POLL_CONTROLLER
3648 .ndo_poll_controller = be_netpoll,
3649#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003650};
3651
3652static void be_netdev_init(struct net_device *netdev)
3653{
3654 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003655 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003656 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003657
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003658 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003659 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3660 NETIF_F_HW_VLAN_TX;
3661 if (be_multi_rxq(adapter))
3662 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003663
3664 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003665 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003666
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003667 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003668 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003669
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003670 netdev->priv_flags |= IFF_UNICAST_FLT;
3671
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003672 netdev->flags |= IFF_MULTICAST;
3673
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003674 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003676 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677
3678 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003680 for_all_evt_queues(adapter, eqo, i)
3681 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003682}
3683
3684static void be_unmap_pci_bars(struct be_adapter *adapter)
3685{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003686 if (adapter->csr)
3687 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003688 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003689 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003690}
3691
Sathya Perlace66f782012-11-06 17:48:58 +00003692static int db_bar(struct be_adapter *adapter)
3693{
3694 if (lancer_chip(adapter) || !be_physfn(adapter))
3695 return 0;
3696 else
3697 return 4;
3698}
3699
3700static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003701{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003702 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003703 adapter->roce_db.size = 4096;
3704 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3705 db_bar(adapter));
3706 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3707 db_bar(adapter));
3708 }
Parav Pandit045508a2012-03-26 14:27:13 +00003709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003710}
3711
3712static int be_map_pci_bars(struct be_adapter *adapter)
3713{
3714 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003715 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003716
Sathya Perlace66f782012-11-06 17:48:58 +00003717 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3718 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3719 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003720
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003721 if (BEx_chip(adapter) && be_physfn(adapter)) {
3722 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3723 if (adapter->csr == NULL)
3724 return -ENOMEM;
3725 }
3726
Sathya Perlace66f782012-11-06 17:48:58 +00003727 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003728 if (addr == NULL)
3729 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003730 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003731
3732 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003733 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003735pci_map_err:
3736 be_unmap_pci_bars(adapter);
3737 return -ENOMEM;
3738}
3739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003740static void be_ctrl_cleanup(struct be_adapter *adapter)
3741{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003742 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003743
3744 be_unmap_pci_bars(adapter);
3745
3746 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003747 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3748 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003749
Sathya Perla5b8821b2011-08-02 19:57:44 +00003750 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003751 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003752 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3753 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003754}
3755
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003756static int be_ctrl_init(struct be_adapter *adapter)
3757{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003758 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3759 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003760 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003761 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003763
Sathya Perlace66f782012-11-06 17:48:58 +00003764 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3765 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3766 SLI_INTF_FAMILY_SHIFT;
3767 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3768
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769 status = be_map_pci_bars(adapter);
3770 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003771 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003772
3773 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003774 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3775 mbox_mem_alloc->size,
3776 &mbox_mem_alloc->dma,
3777 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003778 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003779 status = -ENOMEM;
3780 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003781 }
3782 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3783 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3784 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3785 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003786
Sathya Perla5b8821b2011-08-02 19:57:44 +00003787 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3788 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3789 &rx_filter->dma, GFP_KERNEL);
3790 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003791 status = -ENOMEM;
3792 goto free_mbox;
3793 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003794 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003795 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003796 spin_lock_init(&adapter->mcc_lock);
3797 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003798
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003799 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003800 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003801 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003802
3803free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003804 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3805 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003806
3807unmap_pci_bars:
3808 be_unmap_pci_bars(adapter);
3809
3810done:
3811 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003812}
3813
3814static void be_stats_cleanup(struct be_adapter *adapter)
3815{
Sathya Perla3abcded2010-10-03 22:12:27 -07003816 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003817
3818 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003819 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3820 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003821}
3822
3823static int be_stats_init(struct be_adapter *adapter)
3824{
Sathya Perla3abcded2010-10-03 22:12:27 -07003825 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826
Sathya Perlaca34fe32012-11-06 17:48:56 +00003827 if (lancer_chip(adapter))
3828 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3829 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003830 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003831 else
3832 /* BE3 and Skyhawk */
3833 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3834
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003835 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3836 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837 if (cmd->va == NULL)
3838 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003839 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 return 0;
3841}
3842
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003843static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844{
3845 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003846
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003847 if (!adapter)
3848 return;
3849
Parav Pandit045508a2012-03-26 14:27:13 +00003850 be_roce_dev_remove(adapter);
3851
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003852 cancel_delayed_work_sync(&adapter->func_recovery_work);
3853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003854 unregister_netdev(adapter->netdev);
3855
Sathya Perla5fb379e2009-06-18 00:02:59 +00003856 be_clear(adapter);
3857
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003858 /* tell fw we're done with firing cmds */
3859 be_cmd_fw_clean(adapter);
3860
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003861 be_stats_cleanup(adapter);
3862
3863 be_ctrl_cleanup(adapter);
3864
Sathya Perlad6b6d982012-09-05 01:56:48 +00003865 pci_disable_pcie_error_reporting(pdev);
3866
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003867 pci_set_drvdata(pdev, NULL);
3868 pci_release_regions(pdev);
3869 pci_disable_device(pdev);
3870
3871 free_netdev(adapter->netdev);
3872}
3873
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003874bool be_is_wol_supported(struct be_adapter *adapter)
3875{
3876 return ((adapter->wol_cap & BE_WOL_CAP) &&
3877 !be_is_wol_excluded(adapter)) ? true : false;
3878}
3879
Somnath Kotur941a77d2012-05-17 22:59:03 +00003880u32 be_get_fw_log_level(struct be_adapter *adapter)
3881{
3882 struct be_dma_mem extfat_cmd;
3883 struct be_fat_conf_params *cfgs;
3884 int status;
3885 u32 level = 0;
3886 int j;
3887
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003888 if (lancer_chip(adapter))
3889 return 0;
3890
Somnath Kotur941a77d2012-05-17 22:59:03 +00003891 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3892 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3893 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3894 &extfat_cmd.dma);
3895
3896 if (!extfat_cmd.va) {
3897 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3898 __func__);
3899 goto err;
3900 }
3901
3902 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3903 if (!status) {
3904 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3905 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003906 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003907 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3908 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3909 }
3910 }
3911 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3912 extfat_cmd.dma);
3913err:
3914 return level;
3915}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003916
Sathya Perla39f1d942012-05-08 19:41:24 +00003917static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003918{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003919 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003920 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003921
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003922 status = be_cmd_get_cntl_attributes(adapter);
3923 if (status)
3924 return status;
3925
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003926 status = be_cmd_get_acpi_wol_cap(adapter);
3927 if (status) {
3928 /* in case of a failure to get wol capabillities
3929 * check the exclusion list to determine WOL capability */
3930 if (!be_is_wol_excluded(adapter))
3931 adapter->wol_cap |= BE_WOL_CAP;
3932 }
3933
3934 if (be_is_wol_supported(adapter))
3935 adapter->wol = true;
3936
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003937 /* Must be a power of 2 or else MODULO will BUG_ON */
3938 adapter->be_get_temp_freq = 64;
3939
Somnath Kotur941a77d2012-05-17 22:59:03 +00003940 level = be_get_fw_log_level(adapter);
3941 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3942
Sathya Perla2243e2e2009-11-22 22:02:03 +00003943 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003944}
3945
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003946static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003947{
3948 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003949
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003950 status = lancer_test_and_set_rdy_state(adapter);
3951 if (status)
3952 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003953
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003954 if (netif_running(adapter->netdev))
3955 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003956
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003957 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003958
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003959 adapter->hw_error = false;
3960 adapter->fw_timeout = false;
3961
3962 status = be_setup(adapter);
3963 if (status)
3964 goto err;
3965
3966 if (netif_running(adapter->netdev)) {
3967 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003968 if (status)
3969 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003970 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003971
3972 dev_err(&adapter->pdev->dev,
3973 "Adapter SLIPORT recovery succeeded\n");
3974 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003975err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003976 if (adapter->eeh_error)
3977 dev_err(&adapter->pdev->dev,
3978 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003979
3980 return status;
3981}
3982
3983static void be_func_recovery_task(struct work_struct *work)
3984{
3985 struct be_adapter *adapter =
3986 container_of(work, struct be_adapter, func_recovery_work.work);
3987 int status;
3988
3989 be_detect_error(adapter);
3990
3991 if (adapter->hw_error && lancer_chip(adapter)) {
3992
3993 if (adapter->eeh_error)
3994 goto out;
3995
3996 rtnl_lock();
3997 netif_device_detach(adapter->netdev);
3998 rtnl_unlock();
3999
4000 status = lancer_recover_func(adapter);
4001
4002 if (!status)
4003 netif_device_attach(adapter->netdev);
4004 }
4005
4006out:
4007 schedule_delayed_work(&adapter->func_recovery_work,
4008 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004009}
4010
4011static void be_worker(struct work_struct *work)
4012{
4013 struct be_adapter *adapter =
4014 container_of(work, struct be_adapter, work.work);
4015 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004016 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004017 int i;
4018
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004019 /* when interrupts are not yet enabled, just reap any pending
4020 * mcc completions */
4021 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004022 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004023 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004024 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004025 goto reschedule;
4026 }
4027
4028 if (!adapter->stats_cmd_sent) {
4029 if (lancer_chip(adapter))
4030 lancer_cmd_get_pport_stats(adapter,
4031 &adapter->stats_cmd);
4032 else
4033 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4034 }
4035
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004036 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4037 be_cmd_get_die_temperature(adapter);
4038
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004039 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004040 if (rxo->rx_post_starved) {
4041 rxo->rx_post_starved = false;
4042 be_post_rx_frags(rxo, GFP_KERNEL);
4043 }
4044 }
4045
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004046 for_all_evt_queues(adapter, eqo, i)
4047 be_eqd_update(adapter, eqo);
4048
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004049reschedule:
4050 adapter->work_counter++;
4051 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4052}
4053
Sathya Perla39f1d942012-05-08 19:41:24 +00004054static bool be_reset_required(struct be_adapter *adapter)
4055{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004056 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004057}
4058
Sathya Perlad3791422012-09-28 04:39:44 +00004059static char *mc_name(struct be_adapter *adapter)
4060{
4061 if (adapter->function_mode & FLEX10_MODE)
4062 return "FLEX10";
4063 else if (adapter->function_mode & VNIC_MODE)
4064 return "vNIC";
4065 else if (adapter->function_mode & UMC_ENABLED)
4066 return "UMC";
4067 else
4068 return "";
4069}
4070
4071static inline char *func_name(struct be_adapter *adapter)
4072{
4073 return be_physfn(adapter) ? "PF" : "VF";
4074}
4075
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004076static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004077{
4078 int status = 0;
4079 struct be_adapter *adapter;
4080 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004081 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004082
4083 status = pci_enable_device(pdev);
4084 if (status)
4085 goto do_none;
4086
4087 status = pci_request_regions(pdev, DRV_NAME);
4088 if (status)
4089 goto disable_dev;
4090 pci_set_master(pdev);
4091
Sathya Perla7f640062012-06-05 19:37:20 +00004092 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093 if (netdev == NULL) {
4094 status = -ENOMEM;
4095 goto rel_reg;
4096 }
4097 adapter = netdev_priv(netdev);
4098 adapter->pdev = pdev;
4099 pci_set_drvdata(pdev, adapter);
4100 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004101 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004102
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004103 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004104 if (!status) {
4105 netdev->features |= NETIF_F_HIGHDMA;
4106 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004107 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004108 if (status) {
4109 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4110 goto free_netdev;
4111 }
4112 }
4113
Sathya Perlad6b6d982012-09-05 01:56:48 +00004114 status = pci_enable_pcie_error_reporting(pdev);
4115 if (status)
4116 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004118 status = be_ctrl_init(adapter);
4119 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004120 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004121
Sathya Perla2243e2e2009-11-22 22:02:03 +00004122 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004123 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004124 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004125 if (status)
4126 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004127 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004128
4129 /* tell fw we're ready to fire cmds */
4130 status = be_cmd_fw_init(adapter);
4131 if (status)
4132 goto ctrl_clean;
4133
Sathya Perla39f1d942012-05-08 19:41:24 +00004134 if (be_reset_required(adapter)) {
4135 status = be_cmd_reset_function(adapter);
4136 if (status)
4137 goto ctrl_clean;
4138 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004140 /* The INTR bit may be set in the card when probed by a kdump kernel
4141 * after a crash.
4142 */
4143 if (!lancer_chip(adapter))
4144 be_intr_set(adapter, false);
4145
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004146 status = be_stats_init(adapter);
4147 if (status)
4148 goto ctrl_clean;
4149
Sathya Perla39f1d942012-05-08 19:41:24 +00004150 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004151 if (status)
4152 goto stats_clean;
4153
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004154 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004155 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004156 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004157
Sathya Perla5fb379e2009-06-18 00:02:59 +00004158 status = be_setup(adapter);
4159 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004160 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004161
Sathya Perla3abcded2010-10-03 22:12:27 -07004162 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004163 status = register_netdev(netdev);
4164 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004165 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004166
Parav Pandit045508a2012-03-26 14:27:13 +00004167 be_roce_dev_add(adapter);
4168
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004169 schedule_delayed_work(&adapter->func_recovery_work,
4170 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004171
4172 be_cmd_query_port_name(adapter, &port_name);
4173
Sathya Perlad3791422012-09-28 04:39:44 +00004174 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4175 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004176
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 return 0;
4178
Sathya Perla5fb379e2009-06-18 00:02:59 +00004179unsetup:
4180 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004181stats_clean:
4182 be_stats_cleanup(adapter);
4183ctrl_clean:
4184 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004185free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004186 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004187 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004188rel_reg:
4189 pci_release_regions(pdev);
4190disable_dev:
4191 pci_disable_device(pdev);
4192do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004193 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004194 return status;
4195}
4196
4197static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4198{
4199 struct be_adapter *adapter = pci_get_drvdata(pdev);
4200 struct net_device *netdev = adapter->netdev;
4201
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004202 if (adapter->wol)
4203 be_setup_wol(adapter, true);
4204
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004205 cancel_delayed_work_sync(&adapter->func_recovery_work);
4206
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207 netif_device_detach(netdev);
4208 if (netif_running(netdev)) {
4209 rtnl_lock();
4210 be_close(netdev);
4211 rtnl_unlock();
4212 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004213 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004214
4215 pci_save_state(pdev);
4216 pci_disable_device(pdev);
4217 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4218 return 0;
4219}
4220
4221static int be_resume(struct pci_dev *pdev)
4222{
4223 int status = 0;
4224 struct be_adapter *adapter = pci_get_drvdata(pdev);
4225 struct net_device *netdev = adapter->netdev;
4226
4227 netif_device_detach(netdev);
4228
4229 status = pci_enable_device(pdev);
4230 if (status)
4231 return status;
4232
4233 pci_set_power_state(pdev, 0);
4234 pci_restore_state(pdev);
4235
Sathya Perla2243e2e2009-11-22 22:02:03 +00004236 /* tell fw we're ready to fire cmds */
4237 status = be_cmd_fw_init(adapter);
4238 if (status)
4239 return status;
4240
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004241 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242 if (netif_running(netdev)) {
4243 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244 be_open(netdev);
4245 rtnl_unlock();
4246 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004247
4248 schedule_delayed_work(&adapter->func_recovery_work,
4249 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004251
4252 if (adapter->wol)
4253 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 return 0;
4256}
4257
Sathya Perla82456b02010-02-17 01:35:37 +00004258/*
4259 * An FLR will stop BE from DMAing any data.
4260 */
4261static void be_shutdown(struct pci_dev *pdev)
4262{
4263 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004264
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004265 if (!adapter)
4266 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004267
Sathya Perla0f4a6822011-03-21 20:49:28 +00004268 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004269 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004270
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004271 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004272
Ajit Khaparde57841862011-04-06 18:08:43 +00004273 be_cmd_reset_function(adapter);
4274
Sathya Perla82456b02010-02-17 01:35:37 +00004275 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004276}
4277
Sathya Perlacf588472010-02-14 21:22:01 +00004278static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4279 pci_channel_state_t state)
4280{
4281 struct be_adapter *adapter = pci_get_drvdata(pdev);
4282 struct net_device *netdev = adapter->netdev;
4283
4284 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4285
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004286 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004287
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004288 cancel_delayed_work_sync(&adapter->func_recovery_work);
4289
4290 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004291 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004292 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004293
4294 if (netif_running(netdev)) {
4295 rtnl_lock();
4296 be_close(netdev);
4297 rtnl_unlock();
4298 }
4299 be_clear(adapter);
4300
4301 if (state == pci_channel_io_perm_failure)
4302 return PCI_ERS_RESULT_DISCONNECT;
4303
4304 pci_disable_device(pdev);
4305
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004306 /* The error could cause the FW to trigger a flash debug dump.
4307 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004308 * can cause it not to recover; wait for it to finish.
4309 * Wait only for first function as it is needed only once per
4310 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004311 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004312 if (pdev->devfn == 0)
4313 ssleep(30);
4314
Sathya Perlacf588472010-02-14 21:22:01 +00004315 return PCI_ERS_RESULT_NEED_RESET;
4316}
4317
4318static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4319{
4320 struct be_adapter *adapter = pci_get_drvdata(pdev);
4321 int status;
4322
4323 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004324 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004325
4326 status = pci_enable_device(pdev);
4327 if (status)
4328 return PCI_ERS_RESULT_DISCONNECT;
4329
4330 pci_set_master(pdev);
4331 pci_set_power_state(pdev, 0);
4332 pci_restore_state(pdev);
4333
4334 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004335 dev_info(&adapter->pdev->dev,
4336 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004337 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004338 if (status)
4339 return PCI_ERS_RESULT_DISCONNECT;
4340
Sathya Perlad6b6d982012-09-05 01:56:48 +00004341 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004342 return PCI_ERS_RESULT_RECOVERED;
4343}
4344
4345static void be_eeh_resume(struct pci_dev *pdev)
4346{
4347 int status = 0;
4348 struct be_adapter *adapter = pci_get_drvdata(pdev);
4349 struct net_device *netdev = adapter->netdev;
4350
4351 dev_info(&adapter->pdev->dev, "EEH resume\n");
4352
4353 pci_save_state(pdev);
4354
4355 /* tell fw we're ready to fire cmds */
4356 status = be_cmd_fw_init(adapter);
4357 if (status)
4358 goto err;
4359
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004360 status = be_cmd_reset_function(adapter);
4361 if (status)
4362 goto err;
4363
Sathya Perlacf588472010-02-14 21:22:01 +00004364 status = be_setup(adapter);
4365 if (status)
4366 goto err;
4367
4368 if (netif_running(netdev)) {
4369 status = be_open(netdev);
4370 if (status)
4371 goto err;
4372 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004373
4374 schedule_delayed_work(&adapter->func_recovery_work,
4375 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004376 netif_device_attach(netdev);
4377 return;
4378err:
4379 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004380}
4381
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004382static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004383 .error_detected = be_eeh_err_detected,
4384 .slot_reset = be_eeh_reset,
4385 .resume = be_eeh_resume,
4386};
4387
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388static struct pci_driver be_driver = {
4389 .name = DRV_NAME,
4390 .id_table = be_dev_ids,
4391 .probe = be_probe,
4392 .remove = be_remove,
4393 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004394 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004395 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004396 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004397};
4398
4399static int __init be_init_module(void)
4400{
Joe Perches8e95a202009-12-03 07:58:21 +00004401 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4402 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403 printk(KERN_WARNING DRV_NAME
4404 " : Module param rx_frag_size must be 2048/4096/8192."
4405 " Using 2048\n");
4406 rx_frag_size = 2048;
4407 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004408
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004409 return pci_register_driver(&be_driver);
4410}
4411module_init(be_init_module);
4412
4413static void __exit be_exit_module(void)
4414{
4415 pci_unregister_driver(&be_driver);
4416}
4417module_exit(be_exit_module);