blob: 2886c9b63f9099059d3c8c0fe7a86cddc912854f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
Sathya Perla8788fdc2009-07-27 22:52:03 +0000156static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000160 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000161 return;
162
Sathya Perladb3ea782011-08-22 19:41:52 +0000163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000173
Sathya Perladb3ea782011-08-22 19:41:52 +0000174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000205
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000206 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000207 return;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000224
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000225 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000226 return;
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232}
233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000240 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000241 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
Sathya Perlaa65027e2009-08-17 00:58:04 +0000274 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000275 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 return status;
286}
287
Sathya Perlaca34fe32012-11-06 17:48:56 +0000288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000324
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
Sathya Perlaca34fe32012-11-06 17:48:56 +0000365static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000373
Sathya Perlaac124ff2011-07-25 19:10:14 +0000374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
Selvin Xavier005d5692011-05-16 07:36:35 +0000410static void populate_lancer_stats(struct be_adapter *adapter)
411{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000442 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000446 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000447}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000448
Sathya Perla09c1c682011-08-22 19:41:53 +0000449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461void be_parse_stats(struct be_adapter *adapter)
462{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
Sathya Perlaca34fe32012-11-06 17:48:56 +0000467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000469 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000475
Sathya Perlaca34fe32012-11-06 17:48:56 +0000476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000485 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000486}
487
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000491 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700493 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000494 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 u64 pkts, bytes;
496 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700497 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498
Sathya Perla3abcded2010-10-03 22:12:27 -0700499 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700511 }
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
524 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000534 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000537 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000540
Sathya Perlaab1594e2011-07-25 19:10:15 +0000541 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542
543 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000545
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000551 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552}
553
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556 struct net_device *netdev = adapter->netdev;
557
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000559 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567}
568
Sathya Perla3c8def92011-06-12 20:01:58 +0000569static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571{
Sathya Perla3c8def92011-06-12 20:01:58 +0000572 struct be_tx_stats *stats = tx_stats(txo);
573
Sathya Perlaab1594e2011-07-25 19:10:15 +0000574 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000580 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592 /* to account for hdr wrb */
593 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000610 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611}
612
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
Somnath Kotur93040ae2012-06-26 22:32:10 +0000629static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630{
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632}
633
Somnath Koturcc4ce022010-10-21 07:11:14 -0700634static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000637 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700638
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 memset(hdr, 0, sizeof(*hdr));
640
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000643 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700656 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000658 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000676 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000677 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000680 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000682 }
683}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Sathya Perla3c8def92011-06-12 20:01:58 +0000685static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
Sathya Perla7101e112010-03-22 20:41:12 +0000688 dma_addr_t busaddr;
689 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000690 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000694 bool map_single = false;
695 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700
David S. Millerebc8d2a2009-06-09 01:01:31 -0700701 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700702 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000705 goto dma_err;
706 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
David S. Millerebc8d2a2009-06-09 01:01:31 -0700714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000715 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700716 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000717 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000718 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000719 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000720 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700721 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000722 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000725 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
Somnath Koturcc4ce022010-10-21 07:11:14 -0700735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000743 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749}
750
Somnath Kotur93040ae2012-06-26 22:32:10 +0000751static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb)
753{
754 u16 vlan_tag = 0;
755
756 skb = skb_share_check(skb, GFP_ATOMIC);
757 if (unlikely(!skb))
758 return skb;
759
760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Ivan Veceraf11a8692013-04-12 16:49:24 +0200762 skb = __vlan_put_tag(skb, vlan_tag);
763 if (skb)
764 skb->vlan_tci = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000765 }
766
767 return skb;
768}
769
Stephen Hemminger613573252009-08-31 19:50:58 +0000770static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700771 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772{
773 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000774 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
775 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000778 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 bool dummy_wrb, stopped = false;
780
Somnath Kotur93040ae2012-06-26 22:32:10 +0000781 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
782 VLAN_ETH_HLEN : ETH_HLEN;
783
784 /* HW has a bug which considers padding bytes as legal
785 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000786 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000787 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
788 is_ipv4_pkt(skb)) {
789 ip = (struct iphdr *)ip_hdr(skb);
790 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
791 }
792
793 /* HW has a bug wherein it will calculate CSUM for VLAN
794 * pkts even though it is disabled.
795 * Manually insert VLAN in pkt.
796 */
797 if (skb->ip_summed != CHECKSUM_PARTIAL &&
798 be_vlan_tag_chk(adapter, skb)) {
799 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000800 if (unlikely(!skb))
801 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000802 }
803
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000804 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805
Sathya Perla3c8def92011-06-12 20:01:58 +0000806 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000807 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000808 int gso_segs = skb_shinfo(skb)->gso_segs;
809
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000810 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000811 BUG_ON(txo->sent_skb_list[start]);
812 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000814 /* Ensure txq has space for the next skb; Else stop the queue
815 * *BEFORE* ringing the tx doorbell, so that we serialze the
816 * tx compls of the current transmit which'll wake up the queue
817 */
Sathya Perla7101e112010-03-22 20:41:12 +0000818 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000819 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
820 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000821 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000822 stopped = true;
823 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000825 be_txq_notify(adapter, txq->id, wrb_cnt);
826
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000827 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000828 } else {
829 txq->head = start;
830 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000832tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 return NETDEV_TX_OK;
834}
835
836static int be_change_mtu(struct net_device *netdev, int new_mtu)
837{
838 struct be_adapter *adapter = netdev_priv(netdev);
839 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000840 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
841 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 dev_info(&adapter->pdev->dev,
843 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000844 BE_MIN_MTU,
845 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846 return -EINVAL;
847 }
848 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
849 netdev->mtu, new_mtu);
850 netdev->mtu = new_mtu;
851 return 0;
852}
853
854/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000855 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
856 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857 */
Sathya Perla10329df2012-06-05 19:37:18 +0000858static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859{
Sathya Perla10329df2012-06-05 19:37:18 +0000860 u16 vids[BE_NUM_VLANS_SUPPORTED];
861 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000862 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000863
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000864 /* No need to further configure vids if in promiscuous mode */
865 if (adapter->promiscuous)
866 return 0;
867
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000868 if (adapter->vlans_added > adapter->max_vlans)
869 goto set_vlan_promisc;
870
871 /* Construct VLAN Table to give to HW */
872 for (i = 0; i < VLAN_N_VID; i++)
873 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000874 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000875
876 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000877 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878
879 /* Set to VLAN promisc mode as setting VLAN filter failed */
880 if (status) {
881 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
882 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
883 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000885
Sathya Perlab31c50a2009-09-17 10:30:13 -0700886 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000887
888set_vlan_promisc:
889 status = be_cmd_vlan_config(adapter, adapter->if_handle,
890 NULL, 0, 1, 1);
891 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892}
893
Jiri Pirko8e586132011-12-08 19:52:37 -0500894static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895{
896 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000897 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700898
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000899 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000900 status = -EINVAL;
901 goto ret;
902 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000903
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000904 /* Packets with VID 0 are always received by Lancer by default */
905 if (lancer_chip(adapter) && vid == 0)
906 goto ret;
907
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000909 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000910 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500911
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000912 if (!status)
913 adapter->vlans_added++;
914 else
915 adapter->vlan_tag[vid] = 0;
916ret:
917 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918}
919
Jiri Pirko8e586132011-12-08 19:52:37 -0500920static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921{
922 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000923 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000925 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000926 status = -EINVAL;
927 goto ret;
928 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000929
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000930 /* Packets with VID 0 are always received by Lancer by default */
931 if (lancer_chip(adapter) && vid == 0)
932 goto ret;
933
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700934 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000935 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000936 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500937
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000938 if (!status)
939 adapter->vlans_added--;
940 else
941 adapter->vlan_tag[vid] = 1;
942ret:
943 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944}
945
Sathya Perlaa54769f2011-10-24 02:45:00 +0000946static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700947{
948 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000949 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
951 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000952 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000953 adapter->promiscuous = true;
954 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300957 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000958 if (adapter->promiscuous) {
959 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000960 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000961
962 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000963 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000964 }
965
Sathya Perlae7b909a2009-11-22 22:01:10 +0000966 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000967 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000968 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000969 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000970 goto done;
971 }
972
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000973 if (netdev_uc_count(netdev) != adapter->uc_macs) {
974 struct netdev_hw_addr *ha;
975 int i = 1; /* First slot is claimed by the Primary MAC */
976
977 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
978 be_cmd_pmac_del(adapter, adapter->if_handle,
979 adapter->pmac_id[i], 0);
980 }
981
982 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
983 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
984 adapter->promiscuous = true;
985 goto done;
986 }
987
988 netdev_for_each_uc_addr(ha, adapter->netdev) {
989 adapter->uc_macs++; /* First slot is for Primary MAC */
990 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
991 adapter->if_handle,
992 &adapter->pmac_id[adapter->uc_macs], 0);
993 }
994 }
995
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000996 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
997
998 /* Set to MCAST promisc mode if setting MULTICAST address fails */
999 if (status) {
1000 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1001 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1002 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1003 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001004done:
1005 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006}
1007
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001008static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1009{
1010 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001012 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001013 bool active_mac = false;
1014 u32 pmac_id;
1015 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001016
Sathya Perla11ac75e2011-12-13 00:58:50 +00001017 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001018 return -EPERM;
1019
Sathya Perla11ac75e2011-12-13 00:58:50 +00001020 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001021 return -EINVAL;
1022
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001023 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001024 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1025 &pmac_id, vf + 1);
1026 if (!status && active_mac)
1027 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1028 pmac_id, vf + 1);
1029
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001030 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1031 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001032 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1033 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001034
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1036 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001037 }
1038
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001039 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001040 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1041 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001042 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001043 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001044
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001045 return status;
1046}
1047
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001048static int be_get_vf_config(struct net_device *netdev, int vf,
1049 struct ifla_vf_info *vi)
1050{
1051 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001052 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001053
Sathya Perla11ac75e2011-12-13 00:58:50 +00001054 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001055 return -EPERM;
1056
Sathya Perla11ac75e2011-12-13 00:58:50 +00001057 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001058 return -EINVAL;
1059
1060 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001061 vi->tx_rate = vf_cfg->tx_rate;
1062 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001063 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001064 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001065
1066 return 0;
1067}
1068
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001069static int be_set_vf_vlan(struct net_device *netdev,
1070 int vf, u16 vlan, u8 qos)
1071{
1072 struct be_adapter *adapter = netdev_priv(netdev);
1073 int status = 0;
1074
Sathya Perla11ac75e2011-12-13 00:58:50 +00001075 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001076 return -EPERM;
1077
Sathya Perla11ac75e2011-12-13 00:58:50 +00001078 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001079 return -EINVAL;
1080
1081 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001082 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1083 /* If this is new value, program it. Else skip. */
1084 adapter->vf_cfg[vf].vlan_tag = vlan;
1085
1086 status = be_cmd_set_hsw_config(adapter, vlan,
1087 vf + 1, adapter->vf_cfg[vf].if_handle);
1088 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001089 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001090 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001091 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001092 vlan = adapter->vf_cfg[vf].def_vid;
1093 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1094 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001095 }
1096
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001097
1098 if (status)
1099 dev_info(&adapter->pdev->dev,
1100 "VLAN %d config on VF %d failed\n", vlan, vf);
1101 return status;
1102}
1103
Ajit Khapardee1d18732010-07-23 01:52:13 +00001104static int be_set_vf_tx_rate(struct net_device *netdev,
1105 int vf, int rate)
1106{
1107 struct be_adapter *adapter = netdev_priv(netdev);
1108 int status = 0;
1109
Sathya Perla11ac75e2011-12-13 00:58:50 +00001110 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001111 return -EPERM;
1112
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001113 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001114 return -EINVAL;
1115
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001116 if (rate < 100 || rate > 10000) {
1117 dev_err(&adapter->pdev->dev,
1118 "tx rate must be between 100 and 10000 Mbps\n");
1119 return -EINVAL;
1120 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001121
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001122 if (lancer_chip(adapter))
1123 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1124 else
1125 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001126
1127 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001128 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001129 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001130 else
1131 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001132 return status;
1133}
1134
Sathya Perla39f1d942012-05-08 19:41:24 +00001135static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1136{
1137 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001138 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001139 u16 offset, stride;
1140
1141 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001142 if (!pos)
1143 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001144 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1145 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1146
1147 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1148 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001149 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001150 vfs++;
1151 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1152 assigned_vfs++;
1153 }
1154 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1155 }
1156 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1157}
1158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001159static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001161 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001162 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001163 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001164 u64 pkts;
1165 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001166
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001167 if (!eqo->enable_aic) {
1168 eqd = eqo->eqd;
1169 goto modify_eqd;
1170 }
1171
1172 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001173 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1176
Sathya Perla4097f662009-03-24 16:40:13 -07001177 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001178 if (time_before(now, stats->rx_jiffies)) {
1179 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001180 return;
1181 }
1182
Sathya Perlaac124ff2011-07-25 19:10:14 +00001183 /* Update once a second */
1184 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001185 return;
1186
Sathya Perlaab1594e2011-07-25 19:10:15 +00001187 do {
1188 start = u64_stats_fetch_begin_bh(&stats->sync);
1189 pkts = stats->rx_pkts;
1190 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1191
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001192 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001193 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001194 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001195 eqd = (stats->rx_pps / 110000) << 3;
1196 eqd = min(eqd, eqo->max_eqd);
1197 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001198 if (eqd < 10)
1199 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001200
1201modify_eqd:
1202 if (eqd != eqo->cur_eqd) {
1203 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1204 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001205 }
Sathya Perla4097f662009-03-24 16:40:13 -07001206}
1207
Sathya Perla3abcded2010-10-03 22:12:27 -07001208static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001210{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001211 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001212
Sathya Perlaab1594e2011-07-25 19:10:15 +00001213 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001214 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001215 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001216 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001217 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001219 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001220 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001221 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222}
1223
Sathya Perla2e588f82011-03-11 02:49:26 +00001224static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001225{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001226 /* L4 checksum is not reliable for non TCP/UDP packets.
1227 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1229 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001230}
1231
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001232static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1233 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001235 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001237 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238
Sathya Perla3abcded2010-10-03 22:12:27 -07001239 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 BUG_ON(!rx_page_info->page);
1241
Ajit Khaparde205859a2010-02-09 01:34:21 +00001242 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001243 dma_unmap_page(&adapter->pdev->dev,
1244 dma_unmap_addr(rx_page_info, bus),
1245 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001246 rx_page_info->last_page_user = false;
1247 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248
1249 atomic_dec(&rxq->used);
1250 return rx_page_info;
1251}
1252
1253/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001254static void be_rx_compl_discard(struct be_rx_obj *rxo,
1255 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256{
Sathya Perla3abcded2010-10-03 22:12:27 -07001257 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001261 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001262 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001263 put_page(page_info->page);
1264 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001265 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 }
1267}
1268
1269/*
1270 * skb_fill_rx_data forms a complete skb for an ether frame
1271 * indicated by rxcp.
1272 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1274 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275{
Sathya Perla3abcded2010-10-03 22:12:27 -07001276 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001278 u16 i, j;
1279 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280 u8 *start;
1281
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001282 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 start = page_address(page_info->page) + page_info->page_offset;
1284 prefetch(start);
1285
1286 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001287 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 skb->len = curr_frag_len;
1290 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001291 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292 /* Complete packet has now been moved to data */
1293 put_page(page_info->page);
1294 skb->data_len = 0;
1295 skb->tail += curr_frag_len;
1296 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001297 hdr_len = ETH_HLEN;
1298 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001300 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 skb_shinfo(skb)->frags[0].page_offset =
1302 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001303 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001305 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306 skb->tail += hdr_len;
1307 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001308 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
Sathya Perla2e588f82011-03-11 02:49:26 +00001310 if (rxcp->pkt_size <= rx_frag_size) {
1311 BUG_ON(rxcp->num_rcvd != 1);
1312 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313 }
1314
1315 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001316 index_inc(&rxcp->rxq_idx, rxq->len);
1317 remaining = rxcp->pkt_size - curr_frag_len;
1318 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001319 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001320 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001322 /* Coalesce all frags from the same physical page in one slot */
1323 if (page_info->page_offset == 0) {
1324 /* Fresh page */
1325 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001326 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001327 skb_shinfo(skb)->frags[j].page_offset =
1328 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001329 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001330 skb_shinfo(skb)->nr_frags++;
1331 } else {
1332 put_page(page_info->page);
1333 }
1334
Eric Dumazet9e903e02011-10-18 21:00:24 +00001335 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 skb->len += curr_frag_len;
1337 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001338 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 remaining -= curr_frag_len;
1340 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001341 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001343 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344}
1345
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001346/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001347static void be_rx_compl_process(struct be_rx_obj *rxo,
1348 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001350 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001351 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001353
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001354 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001355 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001356 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001357 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 return;
1359 }
1360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001361 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001363 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001364 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001365 else
1366 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001368 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001369 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001370 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001371 skb->rxhash = rxcp->rss_hash;
1372
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Jiri Pirko343e43c2011-08-25 02:50:51 +00001374 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001375 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1376
1377 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378}
1379
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001380/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001381void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1382 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001384 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001386 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001387 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001388 u16 remaining, curr_frag_len;
1389 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001390
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001391 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001392 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001393 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001394 return;
1395 }
1396
Sathya Perla2e588f82011-03-11 02:49:26 +00001397 remaining = rxcp->pkt_size;
1398 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001399 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400
1401 curr_frag_len = min(remaining, rx_frag_size);
1402
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001403 /* Coalesce all frags from the same physical page in one slot */
1404 if (i == 0 || page_info->page_offset == 0) {
1405 /* First frag or Fresh page */
1406 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001407 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001408 skb_shinfo(skb)->frags[j].page_offset =
1409 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001410 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001411 } else {
1412 put_page(page_info->page);
1413 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001414 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001415 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001417 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 memset(page_info, 0, sizeof(*page_info));
1419 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001420 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001422 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001423 skb->len = rxcp->pkt_size;
1424 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001425 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001426 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001427 if (adapter->netdev->features & NETIF_F_RXHASH)
1428 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001429
Jiri Pirko343e43c2011-08-25 02:50:51 +00001430 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001431 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1432
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001433 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434}
1435
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001436static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1437 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438{
Sathya Perla2e588f82011-03-11 02:49:26 +00001439 rxcp->pkt_size =
1440 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1441 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1442 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1443 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001444 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001445 rxcp->ip_csum =
1446 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1447 rxcp->l4_csum =
1448 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1449 rxcp->ipv6 =
1450 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1451 rxcp->rxq_idx =
1452 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1453 rxcp->num_rcvd =
1454 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1455 rxcp->pkt_type =
1456 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001457 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001458 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001459 if (rxcp->vlanf) {
1460 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001461 compl);
1462 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1463 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001464 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001465 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001466}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001468static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1469 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001470{
1471 rxcp->pkt_size =
1472 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1473 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1474 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1475 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001476 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001477 rxcp->ip_csum =
1478 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1479 rxcp->l4_csum =
1480 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1481 rxcp->ipv6 =
1482 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1483 rxcp->rxq_idx =
1484 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1485 rxcp->num_rcvd =
1486 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1487 rxcp->pkt_type =
1488 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001489 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001490 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001491 if (rxcp->vlanf) {
1492 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001493 compl);
1494 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1495 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001496 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001497 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001498}
1499
1500static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1501{
1502 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1503 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1504 struct be_adapter *adapter = rxo->adapter;
1505
1506 /* For checking the valid bit it is Ok to use either definition as the
1507 * valid bit is at the same position in both v0 and v1 Rx compl */
1508 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 return NULL;
1510
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001511 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001512 be_dws_le_to_cpu(compl, sizeof(*compl));
1513
1514 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001515 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001516 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001518
Sathya Perla15d72182011-03-21 20:49:26 +00001519 if (rxcp->vlanf) {
1520 /* vlanf could be wrongly set in some cards.
1521 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001522 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001523 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001524
Sathya Perla15d72182011-03-21 20:49:26 +00001525 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001526 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001527
Somnath Kotur939cf302011-08-18 21:51:49 -07001528 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001529 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001530 rxcp->vlanf = 0;
1531 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001532
1533 /* As the compl has been parsed, reset it; we wont touch it again */
1534 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 return rxcp;
1538}
1539
Eric Dumazet1829b082011-03-01 05:48:12 +00001540static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001543
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001545 gfp |= __GFP_COMP;
1546 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547}
1548
1549/*
1550 * Allocate a page, split it to fragments of size rx_frag_size and post as
1551 * receive buffers to BE
1552 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001553static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla3abcded2010-10-03 22:12:27 -07001555 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001556 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 struct page *pagep = NULL;
1559 struct be_eth_rx_d *rxd;
1560 u64 page_dmaaddr = 0, frag_dmaaddr;
1561 u32 posted, page_offset = 0;
1562
Sathya Perla3abcded2010-10-03 22:12:27 -07001563 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1565 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001566 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001568 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 break;
1570 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001571 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1572 0, adapter->big_page_size,
1573 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 page_info->page_offset = 0;
1575 } else {
1576 get_page(pagep);
1577 page_info->page_offset = page_offset + rx_frag_size;
1578 }
1579 page_offset = page_info->page_offset;
1580 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001581 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1583
1584 rxd = queue_head_node(rxq);
1585 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1586 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
1588 /* Any space left in the current big page for another frag? */
1589 if ((page_offset + rx_frag_size + rx_frag_size) >
1590 adapter->big_page_size) {
1591 pagep = NULL;
1592 page_info->last_page_user = true;
1593 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001594
1595 prev_page_info = page_info;
1596 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001597 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 }
1599 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001600 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
1602 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001604 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001605 } else if (atomic_read(&rxq->used) == 0) {
1606 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001607 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609}
1610
Sathya Perla5fb379e2009-06-18 00:02:59 +00001611static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1614
1615 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1616 return NULL;
1617
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001618 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1620
1621 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1622
1623 queue_tail_inc(tx_cq);
1624 return txcp;
1625}
1626
Sathya Perla3c8def92011-06-12 20:01:58 +00001627static u16 be_tx_compl_process(struct be_adapter *adapter,
1628 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629{
Sathya Perla3c8def92011-06-12 20:01:58 +00001630 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001631 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001632 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001634 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1635 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001637 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001639 sent_skbs[txq->tail] = NULL;
1640
1641 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001642 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001644 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001646 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001647 unmap_tx_frag(&adapter->pdev->dev, wrb,
1648 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001649 unmap_skb_hdr = false;
1650
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 num_wrbs++;
1652 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001653 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001656 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657}
1658
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001659/* Return the number of events in the event queue */
1660static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001661{
1662 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001663 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001664
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001665 do {
1666 eqe = queue_tail_node(&eqo->q);
1667 if (eqe->evt == 0)
1668 break;
1669
1670 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001671 eqe->evt = 0;
1672 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001673 queue_tail_inc(&eqo->q);
1674 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001675
1676 return num;
1677}
1678
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001679/* Leaves the EQ is disarmed state */
1680static void be_eq_clean(struct be_eq_obj *eqo)
1681{
1682 int num = events_get(eqo);
1683
1684 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1685}
1686
1687static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688{
1689 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001690 struct be_queue_info *rxq = &rxo->q;
1691 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001692 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001693 struct be_adapter *adapter = rxo->adapter;
1694 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695 u16 tail;
1696
Sathya Perlad23e9462012-12-17 19:38:51 +00001697 /* Consume pending rx completions.
1698 * Wait for the flush completion (identified by zero num_rcvd)
1699 * to arrive. Notify CQ even when there are no more CQ entries
1700 * for HW to flush partially coalesced CQ entries.
1701 * In Lancer, there is no need to wait for flush compl.
1702 */
1703 for (;;) {
1704 rxcp = be_rx_compl_get(rxo);
1705 if (rxcp == NULL) {
1706 if (lancer_chip(adapter))
1707 break;
1708
1709 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1710 dev_warn(&adapter->pdev->dev,
1711 "did not receive flush compl\n");
1712 break;
1713 }
1714 be_cq_notify(adapter, rx_cq->id, true, 0);
1715 mdelay(1);
1716 } else {
1717 be_rx_compl_discard(rxo, rxcp);
1718 be_cq_notify(adapter, rx_cq->id, true, 1);
1719 if (rxcp->num_rcvd == 0)
1720 break;
1721 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 }
1723
Sathya Perlad23e9462012-12-17 19:38:51 +00001724 /* After cleanup, leave the CQ in unarmed state */
1725 be_cq_notify(adapter, rx_cq->id, false, 0);
1726
1727 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001729 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001730 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 put_page(page_info->page);
1732 memset(page_info, 0, sizeof(*page_info));
1733 }
1734 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001735 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736}
1737
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001738static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001740 struct be_tx_obj *txo;
1741 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001742 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001743 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001744 struct sk_buff *sent_skb;
1745 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001746 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
Sathya Perlaa8e91792009-08-10 03:42:43 +00001748 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1749 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001750 pending_txqs = adapter->num_tx_qs;
1751
1752 for_all_tx_queues(adapter, txo, i) {
1753 txq = &txo->q;
1754 while ((txcp = be_tx_compl_get(&txo->cq))) {
1755 end_idx =
1756 AMAP_GET_BITS(struct amap_eth_tx_compl,
1757 wrb_index, txcp);
1758 num_wrbs += be_tx_compl_process(adapter, txo,
1759 end_idx);
1760 cmpl++;
1761 }
1762 if (cmpl) {
1763 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1764 atomic_sub(num_wrbs, &txq->used);
1765 cmpl = 0;
1766 num_wrbs = 0;
1767 }
1768 if (atomic_read(&txq->used) == 0)
1769 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001770 }
1771
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001772 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001773 break;
1774
1775 mdelay(1);
1776 } while (true);
1777
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001778 for_all_tx_queues(adapter, txo, i) {
1779 txq = &txo->q;
1780 if (atomic_read(&txq->used))
1781 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1782 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001783
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001784 /* free posted tx for which compls will never arrive */
1785 while (atomic_read(&txq->used)) {
1786 sent_skb = txo->sent_skb_list[txq->tail];
1787 end_idx = txq->tail;
1788 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1789 &dummy_wrb);
1790 index_adv(&end_idx, num_wrbs - 1, txq->len);
1791 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1792 atomic_sub(num_wrbs, &txq->used);
1793 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795}
1796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797static void be_evt_queues_destroy(struct be_adapter *adapter)
1798{
1799 struct be_eq_obj *eqo;
1800 int i;
1801
1802 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001803 if (eqo->q.created) {
1804 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001805 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001806 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001807 be_queue_free(adapter, &eqo->q);
1808 }
1809}
1810
1811static int be_evt_queues_create(struct be_adapter *adapter)
1812{
1813 struct be_queue_info *eq;
1814 struct be_eq_obj *eqo;
1815 int i, rc;
1816
1817 adapter->num_evt_qs = num_irqs(adapter);
1818
1819 for_all_evt_queues(adapter, eqo, i) {
1820 eqo->adapter = adapter;
1821 eqo->tx_budget = BE_TX_BUDGET;
1822 eqo->idx = i;
1823 eqo->max_eqd = BE_MAX_EQD;
1824 eqo->enable_aic = true;
1825
1826 eq = &eqo->q;
1827 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1828 sizeof(struct be_eq_entry));
1829 if (rc)
1830 return rc;
1831
1832 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1833 if (rc)
1834 return rc;
1835 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001836 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001837}
1838
Sathya Perla5fb379e2009-06-18 00:02:59 +00001839static void be_mcc_queues_destroy(struct be_adapter *adapter)
1840{
1841 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001842
Sathya Perla8788fdc2009-07-27 22:52:03 +00001843 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001844 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001845 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001846 be_queue_free(adapter, q);
1847
Sathya Perla8788fdc2009-07-27 22:52:03 +00001848 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001849 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001850 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001851 be_queue_free(adapter, q);
1852}
1853
1854/* Must be called only after TX qs are created as MCC shares TX EQ */
1855static int be_mcc_queues_create(struct be_adapter *adapter)
1856{
1857 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001858
Sathya Perla8788fdc2009-07-27 22:52:03 +00001859 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001860 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001861 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001862 goto err;
1863
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001864 /* Use the default EQ for MCC completions */
1865 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001866 goto mcc_cq_free;
1867
Sathya Perla8788fdc2009-07-27 22:52:03 +00001868 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001869 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1870 goto mcc_cq_destroy;
1871
Sathya Perla8788fdc2009-07-27 22:52:03 +00001872 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001873 goto mcc_q_free;
1874
1875 return 0;
1876
1877mcc_q_free:
1878 be_queue_free(adapter, q);
1879mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001880 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001881mcc_cq_free:
1882 be_queue_free(adapter, cq);
1883err:
1884 return -1;
1885}
1886
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887static void be_tx_queues_destroy(struct be_adapter *adapter)
1888{
1889 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001890 struct be_tx_obj *txo;
1891 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892
Sathya Perla3c8def92011-06-12 20:01:58 +00001893 for_all_tx_queues(adapter, txo, i) {
1894 q = &txo->q;
1895 if (q->created)
1896 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1897 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
Sathya Perla3c8def92011-06-12 20:01:58 +00001899 q = &txo->cq;
1900 if (q->created)
1901 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1902 be_queue_free(adapter, q);
1903 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904}
1905
Sathya Perladafc0fe2011-10-24 02:45:02 +00001906static int be_num_txqs_want(struct be_adapter *adapter)
1907{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001908 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1909 be_is_mc(adapter) ||
1910 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001911 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001912 return 1;
1913 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001914 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001915}
1916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 struct be_queue_info *cq, *eq;
1920 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001921 struct be_tx_obj *txo;
1922 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Sathya Perladafc0fe2011-10-24 02:45:02 +00001924 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001925 if (adapter->num_tx_qs != MAX_TX_QS) {
1926 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001927 netif_set_real_num_tx_queues(adapter->netdev,
1928 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001929 rtnl_unlock();
1930 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001931
Sathya Perla3c8def92011-06-12 20:01:58 +00001932 for_all_tx_queues(adapter, txo, i) {
1933 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001934 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1935 sizeof(struct be_eth_tx_compl));
1936 if (status)
1937 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939 /* If num_evt_qs is less than num_tx_qs, then more than
1940 * one txq share an eq
1941 */
1942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1944 if (status)
1945 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001946 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948}
1949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950static int be_tx_qs_create(struct be_adapter *adapter)
1951{
1952 struct be_tx_obj *txo;
1953 int i, status;
1954
1955 for_all_tx_queues(adapter, txo, i) {
1956 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1957 sizeof(struct be_eth_wrb));
1958 if (status)
1959 return status;
1960
1961 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1962 if (status)
1963 return status;
1964 }
1965
Sathya Perlad3791422012-09-28 04:39:44 +00001966 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1967 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968 return 0;
1969}
1970
1971static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972{
1973 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001974 struct be_rx_obj *rxo;
1975 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perla3abcded2010-10-03 22:12:27 -07001977 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001978 q = &rxo->cq;
1979 if (q->created)
1980 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1981 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983}
1984
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001986{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001988 struct be_rx_obj *rxo;
1989 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001991 /* We'll create as many RSS rings as there are irqs.
1992 * But when there's only one irq there's no use creating RSS rings
1993 */
1994 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1995 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001996 if (adapter->num_rx_qs != MAX_RX_QS) {
1997 rtnl_lock();
1998 netif_set_real_num_rx_queues(adapter->netdev,
1999 adapter->num_rx_qs);
2000 rtnl_unlock();
2001 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002002
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002004 for_all_rx_queues(adapter, rxo, i) {
2005 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002006 cq = &rxo->cq;
2007 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2008 sizeof(struct be_eth_rx_compl));
2009 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2013 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002014 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002016 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017
Sathya Perlad3791422012-09-28 04:39:44 +00002018 dev_info(&adapter->pdev->dev,
2019 "created %d RSS queue(s) and 1 default RX queue\n",
2020 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002022}
2023
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024static irqreturn_t be_intx(int irq, void *dev)
2025{
Sathya Perlae49cc342012-11-27 19:50:02 +00002026 struct be_eq_obj *eqo = dev;
2027 struct be_adapter *adapter = eqo->adapter;
2028 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002030 /* IRQ is not expected when NAPI is scheduled as the EQ
2031 * will not be armed.
2032 * But, this can happen on Lancer INTx where it takes
2033 * a while to de-assert INTx or in BE2 where occasionaly
2034 * an interrupt may be raised even when EQ is unarmed.
2035 * If NAPI is already scheduled, then counting & notifying
2036 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002037 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002038 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002039 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002040 __napi_schedule(&eqo->napi);
2041 if (num_evts)
2042 eqo->spurious_intr = 0;
2043 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002044 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002045
2046 /* Return IRQ_HANDLED only for the the first spurious intr
2047 * after a valid intr to stop the kernel from branding
2048 * this irq as a bad one!
2049 */
2050 if (num_evts || eqo->spurious_intr++ == 0)
2051 return IRQ_HANDLED;
2052 else
2053 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054}
2055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059
Sathya Perla0b545a62012-11-23 00:27:18 +00002060 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2061 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062 return IRQ_HANDLED;
2063}
2064
Sathya Perla2e588f82011-03-11 02:49:26 +00002065static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066{
Sathya Perla2e588f82011-03-11 02:49:26 +00002067 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068}
2069
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2071 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072{
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 struct be_adapter *adapter = rxo->adapter;
2074 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002075 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076 u32 work_done;
2077
2078 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080 if (!rxcp)
2081 break;
2082
Sathya Perla12004ae2011-08-02 19:57:46 +00002083 /* Is it a flush compl that has no data */
2084 if (unlikely(rxcp->num_rcvd == 0))
2085 goto loop_continue;
2086
2087 /* Discard compl with partial DMA Lancer B0 */
2088 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002090 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002091 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002092
Sathya Perla12004ae2011-08-02 19:57:46 +00002093 /* On BE drop pkts that arrive due to imperfect filtering in
2094 * promiscuous mode on some skews
2095 */
2096 if (unlikely(rxcp->port != adapter->port_num &&
2097 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002099 goto loop_continue;
2100 }
2101
2102 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002104 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002106loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002107 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108 }
2109
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110 if (work_done) {
2111 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002112
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002113 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2114 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117 return work_done;
2118}
2119
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002120static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2121 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 for (work_done = 0; work_done < budget; work_done++) {
2127 txcp = be_tx_compl_get(&txo->cq);
2128 if (!txcp)
2129 break;
2130 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002131 AMAP_GET_BITS(struct amap_eth_tx_compl,
2132 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 }
2134
2135 if (work_done) {
2136 be_cq_notify(adapter, txo->cq.id, true, work_done);
2137 atomic_sub(num_wrbs, &txo->q.used);
2138
2139 /* As Tx wrbs have been freed up, wake up netdev queue
2140 * if it was stopped due to lack of tx wrbs. */
2141 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2142 atomic_read(&txo->q.used) < txo->q.len / 2) {
2143 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002144 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002145
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2147 tx_stats(txo)->tx_compl += work_done;
2148 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2149 }
2150 return (work_done < budget); /* Done */
2151}
Sathya Perla3c8def92011-06-12 20:01:58 +00002152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153int be_poll(struct napi_struct *napi, int budget)
2154{
2155 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2156 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002157 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002159
Sathya Perla0b545a62012-11-23 00:27:18 +00002160 num_evts = events_get(eqo);
2161
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 /* Process all TXQs serviced by this EQ */
2163 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2164 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2165 eqo->tx_budget, i);
2166 if (!tx_done)
2167 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168 }
2169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170 /* This loop will iterate twice for EQ0 in which
2171 * completions of the last RXQ (default one) are also processed
2172 * For other EQs the loop iterates only once
2173 */
2174 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2175 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2176 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002177 }
2178
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 if (is_mcc_eqo(eqo))
2180 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 if (max_work < budget) {
2183 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002184 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185 } else {
2186 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002187 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002188 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190}
2191
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002192void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002193{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002194 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2195 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002196 u32 i;
2197
Sathya Perlad23e9462012-12-17 19:38:51 +00002198 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002199 return;
2200
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002201 if (lancer_chip(adapter)) {
2202 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2203 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2204 sliport_err1 = ioread32(adapter->db +
2205 SLIPORT_ERROR1_OFFSET);
2206 sliport_err2 = ioread32(adapter->db +
2207 SLIPORT_ERROR2_OFFSET);
2208 }
2209 } else {
2210 pci_read_config_dword(adapter->pdev,
2211 PCICFG_UE_STATUS_LOW, &ue_lo);
2212 pci_read_config_dword(adapter->pdev,
2213 PCICFG_UE_STATUS_HIGH, &ue_hi);
2214 pci_read_config_dword(adapter->pdev,
2215 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2216 pci_read_config_dword(adapter->pdev,
2217 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002219 ue_lo = (ue_lo & ~ue_lo_mask);
2220 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002221 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002222
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002223 /* On certain platforms BE hardware can indicate spurious UEs.
2224 * Allow the h/w to stop working completely in case of a real UE.
2225 * Hence not setting the hw_error for UE detection.
2226 */
2227 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002228 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002229 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002230 "Error detected in the card\n");
2231 }
2232
2233 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2234 dev_err(&adapter->pdev->dev,
2235 "ERR: sliport status 0x%x\n", sliport_status);
2236 dev_err(&adapter->pdev->dev,
2237 "ERR: sliport error1 0x%x\n", sliport_err1);
2238 dev_err(&adapter->pdev->dev,
2239 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002240 }
2241
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002242 if (ue_lo) {
2243 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2244 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002245 dev_err(&adapter->pdev->dev,
2246 "UE: %s bit set\n", ue_status_low_desc[i]);
2247 }
2248 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002249
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002250 if (ue_hi) {
2251 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2252 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002253 dev_err(&adapter->pdev->dev,
2254 "UE: %s bit set\n", ue_status_hi_desc[i]);
2255 }
2256 }
2257
2258}
2259
Sathya Perla8d56ff12009-11-22 22:02:26 +00002260static void be_msix_disable(struct be_adapter *adapter)
2261{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002262 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002263 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002264 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002265 }
2266}
2267
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268static uint be_num_rss_want(struct be_adapter *adapter)
2269{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002270 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002271
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002272 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002273 (lancer_chip(adapter) ||
2274 (!sriov_want(adapter) && be_physfn(adapter)))) {
2275 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002276 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2277 }
2278 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002279}
2280
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281static void be_msix_enable(struct be_adapter *adapter)
2282{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002284 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002285 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002287 /* If RSS queues are not used, need a vec for default RX Q */
2288 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002289 if (be_roce_supported(adapter)) {
2290 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2291 (num_online_cpus() + 1));
2292 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2293 num_vec += num_roce_vec;
2294 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2295 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002297
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002298 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 adapter->msix_entries[i].entry = i;
2300
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002301 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 if (status == 0) {
2303 goto done;
2304 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002305 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002306 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002307 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002309 }
Sathya Perlad3791422012-09-28 04:39:44 +00002310
2311 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002312 return;
2313done:
Parav Pandit045508a2012-03-26 14:27:13 +00002314 if (be_roce_supported(adapter)) {
2315 if (num_vec > num_roce_vec) {
2316 adapter->num_msix_vec = num_vec - num_roce_vec;
2317 adapter->num_msix_roce_vec =
2318 num_vec - adapter->num_msix_vec;
2319 } else {
2320 adapter->num_msix_vec = num_vec;
2321 adapter->num_msix_roce_vec = 0;
2322 }
2323 } else
2324 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002325 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002326 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327}
2328
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002329static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333}
2334
2335static int be_msix_register(struct be_adapter *adapter)
2336{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 struct net_device *netdev = adapter->netdev;
2338 struct be_eq_obj *eqo;
2339 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 for_all_evt_queues(adapter, eqo, i) {
2342 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2343 vec = be_msix_vec_get(adapter, eqo);
2344 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002345 if (status)
2346 goto err_msix;
2347 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002348
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002349 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002350err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002351 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2352 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2353 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2354 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002355 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356 return status;
2357}
2358
2359static int be_irq_register(struct be_adapter *adapter)
2360{
2361 struct net_device *netdev = adapter->netdev;
2362 int status;
2363
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002364 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365 status = be_msix_register(adapter);
2366 if (status == 0)
2367 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002368 /* INTx is not supported for VF */
2369 if (!be_physfn(adapter))
2370 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371 }
2372
Sathya Perlae49cc342012-11-27 19:50:02 +00002373 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 netdev->irq = adapter->pdev->irq;
2375 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002376 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377 if (status) {
2378 dev_err(&adapter->pdev->dev,
2379 "INTx request IRQ failed - err %d\n", status);
2380 return status;
2381 }
2382done:
2383 adapter->isr_registered = true;
2384 return 0;
2385}
2386
2387static void be_irq_unregister(struct be_adapter *adapter)
2388{
2389 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002391 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392
2393 if (!adapter->isr_registered)
2394 return;
2395
2396 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002397 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002398 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399 goto done;
2400 }
2401
2402 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 for_all_evt_queues(adapter, eqo, i)
2404 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002405
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406done:
2407 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002408}
2409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002411{
2412 struct be_queue_info *q;
2413 struct be_rx_obj *rxo;
2414 int i;
2415
2416 for_all_rx_queues(adapter, rxo, i) {
2417 q = &rxo->q;
2418 if (q->created) {
2419 be_cmd_rxq_destroy(adapter, q);
2420 /* After the rxq is invalidated, wait for a grace time
2421 * of 1ms for all dma to end and the flush compl to
2422 * arrive
2423 */
2424 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002426 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002428 }
2429}
2430
Sathya Perla889cd4b2010-05-30 23:33:45 +00002431static int be_close(struct net_device *netdev)
2432{
2433 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434 struct be_eq_obj *eqo;
2435 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002436
Parav Pandit045508a2012-03-26 14:27:13 +00002437 be_roce_dev_close(adapter);
2438
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002439 if (!lancer_chip(adapter))
2440 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002441
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002442 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002444
2445 be_async_mcc_disable(adapter);
2446
2447 /* Wait for all pending tx completions to arrive so that
2448 * all tx skbs are freed.
2449 */
2450 be_tx_compl_clean(adapter);
2451
2452 be_rx_qs_destroy(adapter);
2453
2454 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002455 if (msix_enabled(adapter))
2456 synchronize_irq(be_msix_vec_get(adapter, eqo));
2457 else
2458 synchronize_irq(netdev->irq);
2459 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002460 }
2461
Sathya Perla889cd4b2010-05-30 23:33:45 +00002462 be_irq_unregister(adapter);
2463
Sathya Perla482c9e72011-06-29 23:33:17 +00002464 return 0;
2465}
2466
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002468{
2469 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002470 int rc, i, j;
2471 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002472
2473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2475 sizeof(struct be_eth_rx_d));
2476 if (rc)
2477 return rc;
2478 }
2479
2480 /* The FW would like the default RXQ to be created first */
2481 rxo = default_rxo(adapter);
2482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2483 adapter->if_handle, false, &rxo->rss_id);
2484 if (rc)
2485 return rc;
2486
2487 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002488 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002489 rx_frag_size, adapter->if_handle,
2490 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002491 if (rc)
2492 return rc;
2493 }
2494
2495 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002496 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2497 for_all_rss_queues(adapter, rxo, i) {
2498 if ((j + i) >= 128)
2499 break;
2500 rsstable[j + i] = rxo->rss_id;
2501 }
2502 }
2503 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002504 if (rc)
2505 return rc;
2506 }
2507
2508 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002509 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002510 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002511 return 0;
2512}
2513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514static int be_open(struct net_device *netdev)
2515{
2516 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002518 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002519 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002520 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002521 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002523 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002524 if (status)
2525 goto err;
2526
Sathya Perla5fb379e2009-06-18 00:02:59 +00002527 be_irq_register(adapter);
2528
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002529 if (!lancer_chip(adapter))
2530 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002531
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002533 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002534
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 for_all_tx_queues(adapter, txo, i)
2536 be_cq_notify(adapter, txo->cq.id, true, 0);
2537
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002538 be_async_mcc_enable(adapter);
2539
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002540 for_all_evt_queues(adapter, eqo, i) {
2541 napi_enable(&eqo->napi);
2542 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2543 }
2544
Sathya Perla323ff712012-09-28 04:39:43 +00002545 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002546 if (!status)
2547 be_link_status_update(adapter, link_status);
2548
Parav Pandit045508a2012-03-26 14:27:13 +00002549 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002550 return 0;
2551err:
2552 be_close(adapter->netdev);
2553 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002554}
2555
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002556static int be_setup_wol(struct be_adapter *adapter, bool enable)
2557{
2558 struct be_dma_mem cmd;
2559 int status = 0;
2560 u8 mac[ETH_ALEN];
2561
2562 memset(mac, 0, ETH_ALEN);
2563
2564 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002565 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2566 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002567 if (cmd.va == NULL)
2568 return -1;
2569 memset(cmd.va, 0, cmd.size);
2570
2571 if (enable) {
2572 status = pci_write_config_dword(adapter->pdev,
2573 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2574 if (status) {
2575 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002576 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002577 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2578 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002579 return status;
2580 }
2581 status = be_cmd_enable_magic_wol(adapter,
2582 adapter->netdev->dev_addr, &cmd);
2583 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2584 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2585 } else {
2586 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2587 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2588 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2589 }
2590
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002591 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002592 return status;
2593}
2594
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002595/*
2596 * Generate a seed MAC address from the PF MAC Address using jhash.
2597 * MAC Address for VFs are assigned incrementally starting from the seed.
2598 * These addresses are programmed in the ASIC by the PF and the VF driver
2599 * queries for the MAC address during its probe.
2600 */
Sathya Perla4c876612013-02-03 20:30:11 +00002601static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002602{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002603 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002604 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002605 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002606 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002607
2608 be_vf_eth_addr_generate(adapter, mac);
2609
Sathya Perla11ac75e2011-12-13 00:58:50 +00002610 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002611 if (lancer_chip(adapter)) {
2612 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2613 } else {
2614 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002615 vf_cfg->if_handle,
2616 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002617 }
2618
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002619 if (status)
2620 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002621 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002622 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002623 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002624
2625 mac[5] += 1;
2626 }
2627 return status;
2628}
2629
Sathya Perla4c876612013-02-03 20:30:11 +00002630static int be_vfs_mac_query(struct be_adapter *adapter)
2631{
2632 int status, vf;
2633 u8 mac[ETH_ALEN];
2634 struct be_vf_cfg *vf_cfg;
2635 bool active;
2636
2637 for_all_vfs(adapter, vf_cfg, vf) {
2638 be_cmd_get_mac_from_list(adapter, mac, &active,
2639 &vf_cfg->pmac_id, 0);
2640
2641 status = be_cmd_mac_addr_query(adapter, mac, false,
2642 vf_cfg->if_handle, 0);
2643 if (status)
2644 return status;
2645 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2646 }
2647 return 0;
2648}
2649
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002651{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002652 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002653 u32 vf;
2654
Sathya Perla39f1d942012-05-08 19:41:24 +00002655 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002656 dev_warn(&adapter->pdev->dev,
2657 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002658 goto done;
2659 }
2660
Sathya Perla11ac75e2011-12-13 00:58:50 +00002661 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002662 if (lancer_chip(adapter))
2663 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2664 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002665 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2666 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002667
Sathya Perla11ac75e2011-12-13 00:58:50 +00002668 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2669 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002670 pci_disable_sriov(adapter->pdev);
2671done:
2672 kfree(adapter->vf_cfg);
2673 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002674}
2675
Sathya Perlaa54769f2011-10-24 02:45:00 +00002676static int be_clear(struct be_adapter *adapter)
2677{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002678 int i = 1;
2679
Sathya Perla191eb752012-02-23 18:50:13 +00002680 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2681 cancel_delayed_work_sync(&adapter->work);
2682 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2683 }
2684
Sathya Perla11ac75e2011-12-13 00:58:50 +00002685 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002686 be_vf_clear(adapter);
2687
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002688 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2689 be_cmd_pmac_del(adapter, adapter->if_handle,
2690 adapter->pmac_id[i], 0);
2691
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002692 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002693
2694 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002695 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002696 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002697 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002698
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002699 kfree(adapter->pmac_id);
2700 adapter->pmac_id = NULL;
2701
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002702 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002703 return 0;
2704}
2705
Sathya Perla4c876612013-02-03 20:30:11 +00002706static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002707{
Sathya Perla4c876612013-02-03 20:30:11 +00002708 struct be_vf_cfg *vf_cfg;
2709 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002710 int status;
2711
Sathya Perla4c876612013-02-03 20:30:11 +00002712 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2713 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002714
Sathya Perla4c876612013-02-03 20:30:11 +00002715 for_all_vfs(adapter, vf_cfg, vf) {
2716 if (!BE3_chip(adapter))
2717 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2718
2719 /* If a FW profile exists, then cap_flags are updated */
2720 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2721 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2722 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2723 &vf_cfg->if_handle, vf + 1);
2724 if (status)
2725 goto err;
2726 }
2727err:
2728 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002729}
2730
Sathya Perla39f1d942012-05-08 19:41:24 +00002731static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002732{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002733 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002734 int vf;
2735
Sathya Perla39f1d942012-05-08 19:41:24 +00002736 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2737 GFP_KERNEL);
2738 if (!adapter->vf_cfg)
2739 return -ENOMEM;
2740
Sathya Perla11ac75e2011-12-13 00:58:50 +00002741 for_all_vfs(adapter, vf_cfg, vf) {
2742 vf_cfg->if_handle = -1;
2743 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002744 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002745 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002746}
2747
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002748static int be_vf_setup(struct be_adapter *adapter)
2749{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002750 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002751 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002752 int status, old_vfs, vf;
2753 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002754
Sathya Perla4c876612013-02-03 20:30:11 +00002755 old_vfs = be_find_vfs(adapter, ENABLED);
2756 if (old_vfs) {
2757 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2758 if (old_vfs != num_vfs)
2759 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2760 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002761 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002762 if (num_vfs > adapter->dev_num_vfs)
2763 dev_info(dev, "Device supports %d VFs and not %d\n",
2764 adapter->dev_num_vfs, num_vfs);
2765 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2766
2767 status = pci_enable_sriov(adapter->pdev, num_vfs);
2768 if (status) {
2769 dev_err(dev, "SRIOV enable failed\n");
2770 adapter->num_vfs = 0;
2771 return 0;
2772 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002773 }
2774
2775 status = be_vf_setup_init(adapter);
2776 if (status)
2777 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002778
Sathya Perla4c876612013-02-03 20:30:11 +00002779 if (old_vfs) {
2780 for_all_vfs(adapter, vf_cfg, vf) {
2781 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2782 if (status)
2783 goto err;
2784 }
2785 } else {
2786 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002787 if (status)
2788 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002789 }
2790
Sathya Perla4c876612013-02-03 20:30:11 +00002791 if (old_vfs) {
2792 status = be_vfs_mac_query(adapter);
2793 if (status)
2794 goto err;
2795 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002796 status = be_vf_eth_addr_config(adapter);
2797 if (status)
2798 goto err;
2799 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002800
Sathya Perla11ac75e2011-12-13 00:58:50 +00002801 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002802 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2803 * Allow full available bandwidth
2804 */
2805 if (BE3_chip(adapter) && !old_vfs)
2806 be_cmd_set_qos(adapter, 1000, vf+1);
2807
2808 status = be_cmd_link_status_query(adapter, &lnk_speed,
2809 NULL, vf + 1);
2810 if (!status)
2811 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002812
2813 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002814 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002815 if (status)
2816 goto err;
2817 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002818
2819 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002820 }
2821 return 0;
2822err:
Sathya Perla4c876612013-02-03 20:30:11 +00002823 dev_err(dev, "VF setup failed\n");
2824 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002825 return status;
2826}
2827
Sathya Perla30128032011-11-10 19:17:57 +00002828static void be_setup_init(struct be_adapter *adapter)
2829{
2830 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002831 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002832 adapter->if_handle = -1;
2833 adapter->be3_native = false;
2834 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002835 if (be_physfn(adapter))
2836 adapter->cmd_privileges = MAX_PRIVILEGES;
2837 else
2838 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002839}
2840
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002841static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2842 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002843{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002844 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002845
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002846 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2847 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2848 if (!lancer_chip(adapter) && !be_physfn(adapter))
2849 *active_mac = true;
2850 else
2851 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002852
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002853 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002854 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002855
2856 if (lancer_chip(adapter)) {
2857 status = be_cmd_get_mac_from_list(adapter, mac,
2858 active_mac, pmac_id, 0);
2859 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002860 status = be_cmd_mac_addr_query(adapter, mac, false,
2861 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002862 }
2863 } else if (be_physfn(adapter)) {
2864 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002865 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002866 *active_mac = false;
2867 } else {
2868 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002869 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002870 if_handle, 0);
2871 *active_mac = true;
2872 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002873 return status;
2874}
2875
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002876static void be_get_resources(struct be_adapter *adapter)
2877{
Sathya Perla4c876612013-02-03 20:30:11 +00002878 u16 dev_num_vfs;
2879 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002880 bool profile_present = false;
2881
Sathya Perla4c876612013-02-03 20:30:11 +00002882 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002883 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002884 if (!status)
2885 profile_present = true;
2886 }
2887
2888 if (profile_present) {
2889 /* Sanity fixes for Lancer */
2890 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2891 BE_UC_PMAC_COUNT);
2892 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2893 BE_NUM_VLANS_SUPPORTED);
2894 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2895 BE_MAX_MC);
2896 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2897 MAX_TX_QS);
2898 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2899 BE3_MAX_RSS_QS);
2900 adapter->max_event_queues = min_t(u16,
2901 adapter->max_event_queues,
2902 BE3_MAX_RSS_QS);
2903
2904 if (adapter->max_rss_queues &&
2905 adapter->max_rss_queues == adapter->max_rx_queues)
2906 adapter->max_rss_queues -= 1;
2907
2908 if (adapter->max_event_queues < adapter->max_rss_queues)
2909 adapter->max_rss_queues = adapter->max_event_queues;
2910
2911 } else {
2912 if (be_physfn(adapter))
2913 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2914 else
2915 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2916
2917 if (adapter->function_mode & FLEX10_MODE)
2918 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2919 else
2920 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2921
2922 adapter->max_mcast_mac = BE_MAX_MC;
2923 adapter->max_tx_queues = MAX_TX_QS;
2924 adapter->max_rss_queues = (adapter->be3_native) ?
2925 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2926 adapter->max_event_queues = BE3_MAX_RSS_QS;
2927
2928 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2929 BE_IF_FLAGS_BROADCAST |
2930 BE_IF_FLAGS_MULTICAST |
2931 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2932 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2933 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2934 BE_IF_FLAGS_PROMISCUOUS;
2935
2936 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2937 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2938 }
Sathya Perla4c876612013-02-03 20:30:11 +00002939
2940 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2941 if (pos) {
2942 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2943 &dev_num_vfs);
2944 if (BE3_chip(adapter))
2945 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2946 adapter->dev_num_vfs = dev_num_vfs;
2947 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002948}
2949
Sathya Perla39f1d942012-05-08 19:41:24 +00002950/* Routine to query per function resource limits */
2951static int be_get_config(struct be_adapter *adapter)
2952{
Sathya Perla4c876612013-02-03 20:30:11 +00002953 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002954
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002955 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2956 &adapter->function_mode,
2957 &adapter->function_caps);
2958 if (status)
2959 goto err;
2960
2961 be_get_resources(adapter);
2962
2963 /* primary mac needs 1 pmac entry */
2964 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2965 sizeof(u32), GFP_KERNEL);
2966 if (!adapter->pmac_id) {
2967 status = -ENOMEM;
2968 goto err;
2969 }
2970
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002971err:
2972 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002973}
2974
Sathya Perla5fb379e2009-06-18 00:02:59 +00002975static int be_setup(struct be_adapter *adapter)
2976{
Sathya Perla39f1d942012-05-08 19:41:24 +00002977 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002978 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002979 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002980 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002981 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002982 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983
Sathya Perla30128032011-11-10 19:17:57 +00002984 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002985
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002986 if (!lancer_chip(adapter))
2987 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002988
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002989 status = be_get_config(adapter);
2990 if (status)
2991 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002993 be_msix_enable(adapter);
2994
2995 status = be_evt_queues_create(adapter);
2996 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002997 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002999 status = be_tx_cqs_create(adapter);
3000 if (status)
3001 goto err;
3002
3003 status = be_rx_cqs_create(adapter);
3004 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003005 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
Sathya Perla5fb379e2009-06-18 00:02:59 +00003007 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003008 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003009 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003011 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3012 /* In UMC mode FW does not return right privileges.
3013 * Override with correct privilege equivalent to PF.
3014 */
3015 if (be_is_mc(adapter))
3016 adapter->cmd_privileges = MAX_PRIVILEGES;
3017
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003018 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3019 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003020
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003021 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003022 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003023
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003024 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003025
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003026 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003027 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003028 if (status != 0)
3029 goto err;
3030
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003031 memset(mac, 0, ETH_ALEN);
3032 active_mac = false;
3033 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3034 &active_mac, &adapter->pmac_id[0]);
3035 if (status != 0)
3036 goto err;
3037
3038 if (!active_mac) {
3039 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3040 &adapter->pmac_id[0], 0);
3041 if (status != 0)
3042 goto err;
3043 }
3044
3045 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3046 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3047 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003048 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003050 status = be_tx_qs_create(adapter);
3051 if (status)
3052 goto err;
3053
Sathya Perla04b71172011-09-27 13:30:27 -04003054 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003055
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003056 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003057 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003058
3059 be_set_rx_mode(adapter->netdev);
3060
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003061 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003062
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003063 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3064 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003065 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003066
Sathya Perla39f1d942012-05-08 19:41:24 +00003067 if (be_physfn(adapter) && num_vfs) {
3068 if (adapter->dev_num_vfs)
3069 be_vf_setup(adapter);
3070 else
3071 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003072 }
3073
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003074 status = be_cmd_get_phy_info(adapter);
3075 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003076 adapter->phy.fc_autoneg = 1;
3077
Sathya Perla191eb752012-02-23 18:50:13 +00003078 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3079 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003080 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003081err:
3082 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003083 return status;
3084}
3085
Ivan Vecera66268732011-12-08 01:31:21 +00003086#ifdef CONFIG_NET_POLL_CONTROLLER
3087static void be_netpoll(struct net_device *netdev)
3088{
3089 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003090 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003091 int i;
3092
Sathya Perlae49cc342012-11-27 19:50:02 +00003093 for_all_evt_queues(adapter, eqo, i) {
3094 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3095 napi_schedule(&eqo->napi);
3096 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003097
3098 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003099}
3100#endif
3101
Ajit Khaparde84517482009-09-04 03:12:16 +00003102#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003103char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3104
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003105static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003106 const u8 *p, u32 img_start, int image_size,
3107 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003108{
3109 u32 crc_offset;
3110 u8 flashed_crc[4];
3111 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003112
3113 crc_offset = hdr_size + img_start + image_size - 4;
3114
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003115 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003116
3117 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003118 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003119 if (status) {
3120 dev_err(&adapter->pdev->dev,
3121 "could not get crc from flash, not flashing redboot\n");
3122 return false;
3123 }
3124
3125 /*update redboot only if crc does not match*/
3126 if (!memcmp(flashed_crc, p, 4))
3127 return false;
3128 else
3129 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003130}
3131
Sathya Perla306f1342011-08-02 19:57:45 +00003132static bool phy_flashing_required(struct be_adapter *adapter)
3133{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003134 return (adapter->phy.phy_type == TN_8022 &&
3135 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003136}
3137
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003138static bool is_comp_in_ufi(struct be_adapter *adapter,
3139 struct flash_section_info *fsec, int type)
3140{
3141 int i = 0, img_type = 0;
3142 struct flash_section_info_g2 *fsec_g2 = NULL;
3143
Sathya Perlaca34fe32012-11-06 17:48:56 +00003144 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003145 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3146
3147 for (i = 0; i < MAX_FLASH_COMP; i++) {
3148 if (fsec_g2)
3149 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3150 else
3151 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3152
3153 if (img_type == type)
3154 return true;
3155 }
3156 return false;
3157
3158}
3159
3160struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3161 int header_size,
3162 const struct firmware *fw)
3163{
3164 struct flash_section_info *fsec = NULL;
3165 const u8 *p = fw->data;
3166
3167 p += header_size;
3168 while (p < (fw->data + fw->size)) {
3169 fsec = (struct flash_section_info *)p;
3170 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3171 return fsec;
3172 p += 32;
3173 }
3174 return NULL;
3175}
3176
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003177static int be_flash(struct be_adapter *adapter, const u8 *img,
3178 struct be_dma_mem *flash_cmd, int optype, int img_size)
3179{
3180 u32 total_bytes = 0, flash_op, num_bytes = 0;
3181 int status = 0;
3182 struct be_cmd_write_flashrom *req = flash_cmd->va;
3183
3184 total_bytes = img_size;
3185 while (total_bytes) {
3186 num_bytes = min_t(u32, 32*1024, total_bytes);
3187
3188 total_bytes -= num_bytes;
3189
3190 if (!total_bytes) {
3191 if (optype == OPTYPE_PHY_FW)
3192 flash_op = FLASHROM_OPER_PHY_FLASH;
3193 else
3194 flash_op = FLASHROM_OPER_FLASH;
3195 } else {
3196 if (optype == OPTYPE_PHY_FW)
3197 flash_op = FLASHROM_OPER_PHY_SAVE;
3198 else
3199 flash_op = FLASHROM_OPER_SAVE;
3200 }
3201
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003202 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003203 img += num_bytes;
3204 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3205 flash_op, num_bytes);
3206 if (status) {
3207 if (status == ILLEGAL_IOCTL_REQ &&
3208 optype == OPTYPE_PHY_FW)
3209 break;
3210 dev_err(&adapter->pdev->dev,
3211 "cmd to write to flash rom failed.\n");
3212 return status;
3213 }
3214 }
3215 return 0;
3216}
3217
Sathya Perlaca34fe32012-11-06 17:48:56 +00003218/* For BE2 and BE3 */
3219static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003220 const struct firmware *fw,
3221 struct be_dma_mem *flash_cmd,
3222 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003223
Ajit Khaparde84517482009-09-04 03:12:16 +00003224{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003225 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003226 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003227 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003228 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003229 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003230 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003231
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003232 struct flash_comp gen3_flash_types[] = {
3233 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3234 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3235 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3236 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3237 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3238 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3239 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3240 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3241 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3242 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3243 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3244 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3245 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3246 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3247 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3248 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3249 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3250 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3251 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3252 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003253 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003254
3255 struct flash_comp gen2_flash_types[] = {
3256 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3257 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3258 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3259 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3260 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3261 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3262 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3263 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3264 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3266 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3267 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3268 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3269 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3270 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003272 };
3273
Sathya Perlaca34fe32012-11-06 17:48:56 +00003274 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003275 pflashcomp = gen3_flash_types;
3276 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003277 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003278 } else {
3279 pflashcomp = gen2_flash_types;
3280 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003281 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003282 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003283
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003284 /* Get flash section info*/
3285 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3286 if (!fsec) {
3287 dev_err(&adapter->pdev->dev,
3288 "Invalid Cookie. UFI corrupted ?\n");
3289 return -1;
3290 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003291 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003292 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003293 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003294
3295 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3296 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3297 continue;
3298
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003299 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3300 !phy_flashing_required(adapter))
3301 continue;
3302
3303 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3304 redboot = be_flash_redboot(adapter, fw->data,
3305 pflashcomp[i].offset, pflashcomp[i].size,
3306 filehdr_size + img_hdrs_size);
3307 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003308 continue;
3309 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003310
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003311 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003312 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003313 if (p + pflashcomp[i].size > fw->data + fw->size)
3314 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003315
3316 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3317 pflashcomp[i].size);
3318 if (status) {
3319 dev_err(&adapter->pdev->dev,
3320 "Flashing section type %d failed.\n",
3321 pflashcomp[i].img_type);
3322 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003323 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003324 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003325 return 0;
3326}
3327
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003328static int be_flash_skyhawk(struct be_adapter *adapter,
3329 const struct firmware *fw,
3330 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003331{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003332 int status = 0, i, filehdr_size = 0;
3333 int img_offset, img_size, img_optype, redboot;
3334 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3335 const u8 *p = fw->data;
3336 struct flash_section_info *fsec = NULL;
3337
3338 filehdr_size = sizeof(struct flash_file_hdr_g3);
3339 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3340 if (!fsec) {
3341 dev_err(&adapter->pdev->dev,
3342 "Invalid Cookie. UFI corrupted ?\n");
3343 return -1;
3344 }
3345
3346 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3347 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3348 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3349
3350 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3351 case IMAGE_FIRMWARE_iSCSI:
3352 img_optype = OPTYPE_ISCSI_ACTIVE;
3353 break;
3354 case IMAGE_BOOT_CODE:
3355 img_optype = OPTYPE_REDBOOT;
3356 break;
3357 case IMAGE_OPTION_ROM_ISCSI:
3358 img_optype = OPTYPE_BIOS;
3359 break;
3360 case IMAGE_OPTION_ROM_PXE:
3361 img_optype = OPTYPE_PXE_BIOS;
3362 break;
3363 case IMAGE_OPTION_ROM_FCoE:
3364 img_optype = OPTYPE_FCOE_BIOS;
3365 break;
3366 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3367 img_optype = OPTYPE_ISCSI_BACKUP;
3368 break;
3369 case IMAGE_NCSI:
3370 img_optype = OPTYPE_NCSI_FW;
3371 break;
3372 default:
3373 continue;
3374 }
3375
3376 if (img_optype == OPTYPE_REDBOOT) {
3377 redboot = be_flash_redboot(adapter, fw->data,
3378 img_offset, img_size,
3379 filehdr_size + img_hdrs_size);
3380 if (!redboot)
3381 continue;
3382 }
3383
3384 p = fw->data;
3385 p += filehdr_size + img_offset + img_hdrs_size;
3386 if (p + img_size > fw->data + fw->size)
3387 return -1;
3388
3389 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3390 if (status) {
3391 dev_err(&adapter->pdev->dev,
3392 "Flashing section type %d failed.\n",
3393 fsec->fsec_entry[i].type);
3394 return status;
3395 }
3396 }
3397 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003398}
3399
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003400static int lancer_wait_idle(struct be_adapter *adapter)
3401{
3402#define SLIPORT_IDLE_TIMEOUT 30
3403 u32 reg_val;
3404 int status = 0, i;
3405
3406 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3407 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3408 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3409 break;
3410
3411 ssleep(1);
3412 }
3413
3414 if (i == SLIPORT_IDLE_TIMEOUT)
3415 status = -1;
3416
3417 return status;
3418}
3419
3420static int lancer_fw_reset(struct be_adapter *adapter)
3421{
3422 int status = 0;
3423
3424 status = lancer_wait_idle(adapter);
3425 if (status)
3426 return status;
3427
3428 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3429 PHYSDEV_CONTROL_OFFSET);
3430
3431 return status;
3432}
3433
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003434static int lancer_fw_download(struct be_adapter *adapter,
3435 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003436{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003437#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3438#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3439 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003440 const u8 *data_ptr = NULL;
3441 u8 *dest_image_ptr = NULL;
3442 size_t image_size = 0;
3443 u32 chunk_size = 0;
3444 u32 data_written = 0;
3445 u32 offset = 0;
3446 int status = 0;
3447 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003448 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003449
3450 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3451 dev_err(&adapter->pdev->dev,
3452 "FW Image not properly aligned. "
3453 "Length must be 4 byte aligned.\n");
3454 status = -EINVAL;
3455 goto lancer_fw_exit;
3456 }
3457
3458 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3459 + LANCER_FW_DOWNLOAD_CHUNK;
3460 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3461 &flash_cmd.dma, GFP_KERNEL);
3462 if (!flash_cmd.va) {
3463 status = -ENOMEM;
3464 dev_err(&adapter->pdev->dev,
3465 "Memory allocation failure while flashing\n");
3466 goto lancer_fw_exit;
3467 }
3468
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003469 dest_image_ptr = flash_cmd.va +
3470 sizeof(struct lancer_cmd_req_write_object);
3471 image_size = fw->size;
3472 data_ptr = fw->data;
3473
3474 while (image_size) {
3475 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3476
3477 /* Copy the image chunk content. */
3478 memcpy(dest_image_ptr, data_ptr, chunk_size);
3479
3480 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003481 chunk_size, offset,
3482 LANCER_FW_DOWNLOAD_LOCATION,
3483 &data_written, &change_status,
3484 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003485 if (status)
3486 break;
3487
3488 offset += data_written;
3489 data_ptr += data_written;
3490 image_size -= data_written;
3491 }
3492
3493 if (!status) {
3494 /* Commit the FW written */
3495 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003496 0, offset,
3497 LANCER_FW_DOWNLOAD_LOCATION,
3498 &data_written, &change_status,
3499 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003500 }
3501
3502 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3503 flash_cmd.dma);
3504 if (status) {
3505 dev_err(&adapter->pdev->dev,
3506 "Firmware load error. "
3507 "Status code: 0x%x Additional Status: 0x%x\n",
3508 status, add_status);
3509 goto lancer_fw_exit;
3510 }
3511
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003512 if (change_status == LANCER_FW_RESET_NEEDED) {
3513 status = lancer_fw_reset(adapter);
3514 if (status) {
3515 dev_err(&adapter->pdev->dev,
3516 "Adapter busy for FW reset.\n"
3517 "New FW will not be active.\n");
3518 goto lancer_fw_exit;
3519 }
3520 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3521 dev_err(&adapter->pdev->dev,
3522 "System reboot required for new FW"
3523 " to be active\n");
3524 }
3525
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003526 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3527lancer_fw_exit:
3528 return status;
3529}
3530
Sathya Perlaca34fe32012-11-06 17:48:56 +00003531#define UFI_TYPE2 2
3532#define UFI_TYPE3 3
3533#define UFI_TYPE4 4
3534static int be_get_ufi_type(struct be_adapter *adapter,
3535 struct flash_file_hdr_g2 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003536{
3537 if (fhdr == NULL)
3538 goto be_get_ufi_exit;
3539
Sathya Perlaca34fe32012-11-06 17:48:56 +00003540 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3541 return UFI_TYPE4;
3542 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3543 return UFI_TYPE3;
3544 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3545 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003546
3547be_get_ufi_exit:
3548 dev_err(&adapter->pdev->dev,
3549 "UFI and Interface are not compatible for flashing\n");
3550 return -1;
3551}
3552
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003553static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3554{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003555 struct flash_file_hdr_g2 *fhdr;
3556 struct flash_file_hdr_g3 *fhdr3;
3557 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003558 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003559 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003560 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003561
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003562 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003563 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3564 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003565 if (!flash_cmd.va) {
3566 status = -ENOMEM;
3567 dev_err(&adapter->pdev->dev,
3568 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003569 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003570 }
3571
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003572 p = fw->data;
3573 fhdr = (struct flash_file_hdr_g2 *)p;
3574
Sathya Perlaca34fe32012-11-06 17:48:56 +00003575 ufi_type = be_get_ufi_type(adapter, fhdr);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003576
3577 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579 for (i = 0; i < num_imgs; i++) {
3580 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581 (sizeof(struct flash_file_hdr_g3) +
3582 i * sizeof(struct image_hdr)));
3583 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Sathya Perlaca34fe32012-11-06 17:48:56 +00003584 if (ufi_type == UFI_TYPE4)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003585 status = be_flash_skyhawk(adapter, fw,
3586 &flash_cmd, num_imgs);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003587 else if (ufi_type == UFI_TYPE3)
3588 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003590 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003591 }
3592
Sathya Perlaca34fe32012-11-06 17:48:56 +00003593 if (ufi_type == UFI_TYPE2)
3594 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003595 else if (ufi_type == -1)
3596 status = -1;
3597
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003598 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3599 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003600 if (status) {
3601 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003602 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003603 }
3604
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003606
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003607be_fw_exit:
3608 return status;
3609}
3610
3611int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3612{
3613 const struct firmware *fw;
3614 int status;
3615
3616 if (!netif_running(adapter->netdev)) {
3617 dev_err(&adapter->pdev->dev,
3618 "Firmware load not allowed (interface is down)\n");
3619 return -1;
3620 }
3621
3622 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3623 if (status)
3624 goto fw_exit;
3625
3626 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3627
3628 if (lancer_chip(adapter))
3629 status = lancer_fw_download(adapter, fw);
3630 else
3631 status = be_fw_download(adapter, fw);
3632
Ajit Khaparde84517482009-09-04 03:12:16 +00003633fw_exit:
3634 release_firmware(fw);
3635 return status;
3636}
3637
stephen hemmingere5686ad2012-01-05 19:10:25 +00003638static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639 .ndo_open = be_open,
3640 .ndo_stop = be_close,
3641 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003642 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643 .ndo_set_mac_address = be_mac_addr_set,
3644 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003645 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3648 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003649 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003650 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003651 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003652 .ndo_get_vf_config = be_get_vf_config,
3653#ifdef CONFIG_NET_POLL_CONTROLLER
3654 .ndo_poll_controller = be_netpoll,
3655#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003656};
3657
3658static void be_netdev_init(struct net_device *netdev)
3659{
3660 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003661 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003662 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003663
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003664 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003665 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3666 NETIF_F_HW_VLAN_TX;
3667 if (be_multi_rxq(adapter))
3668 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003669
3670 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003671 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003672
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003675
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003676 netdev->priv_flags |= IFF_UNICAST_FLT;
3677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003678 netdev->flags |= IFF_MULTICAST;
3679
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003680 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003682 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003683
3684 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3685
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686 for_all_evt_queues(adapter, eqo, i)
3687 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003688}
3689
3690static void be_unmap_pci_bars(struct be_adapter *adapter)
3691{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003692 if (adapter->csr)
3693 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003694 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003695 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003696}
3697
Sathya Perlace66f782012-11-06 17:48:58 +00003698static int db_bar(struct be_adapter *adapter)
3699{
3700 if (lancer_chip(adapter) || !be_physfn(adapter))
3701 return 0;
3702 else
3703 return 4;
3704}
3705
3706static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003707{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003708 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003709 adapter->roce_db.size = 4096;
3710 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3711 db_bar(adapter));
3712 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3713 db_bar(adapter));
3714 }
Parav Pandit045508a2012-03-26 14:27:13 +00003715 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003716}
3717
3718static int be_map_pci_bars(struct be_adapter *adapter)
3719{
3720 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003721 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003722
Sathya Perlace66f782012-11-06 17:48:58 +00003723 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3724 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3725 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003726
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003727 if (BEx_chip(adapter) && be_physfn(adapter)) {
3728 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3729 if (adapter->csr == NULL)
3730 return -ENOMEM;
3731 }
3732
Sathya Perlace66f782012-11-06 17:48:58 +00003733 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003734 if (addr == NULL)
3735 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003736 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003737
3738 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003739 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003740
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003741pci_map_err:
3742 be_unmap_pci_bars(adapter);
3743 return -ENOMEM;
3744}
3745
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003746static void be_ctrl_cleanup(struct be_adapter *adapter)
3747{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003748 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003749
3750 be_unmap_pci_bars(adapter);
3751
3752 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003753 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3754 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003755
Sathya Perla5b8821b2011-08-02 19:57:44 +00003756 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003757 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003758 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3759 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003760}
3761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762static int be_ctrl_init(struct be_adapter *adapter)
3763{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003764 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3765 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003766 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003767 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003768 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769
Sathya Perlace66f782012-11-06 17:48:58 +00003770 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3772 SLI_INTF_FAMILY_SHIFT;
3773 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3774
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775 status = be_map_pci_bars(adapter);
3776 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003777 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003778
3779 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003780 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3781 mbox_mem_alloc->size,
3782 &mbox_mem_alloc->dma,
3783 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003785 status = -ENOMEM;
3786 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003787 }
3788 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3789 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3790 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3791 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003792
Sathya Perla5b8821b2011-08-02 19:57:44 +00003793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3795 &rx_filter->dma, GFP_KERNEL);
3796 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003797 status = -ENOMEM;
3798 goto free_mbox;
3799 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003800 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003801 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003802 spin_lock_init(&adapter->mcc_lock);
3803 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003804
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003805 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003806 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003807 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003808
3809free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003810 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3811 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003812
3813unmap_pci_bars:
3814 be_unmap_pci_bars(adapter);
3815
3816done:
3817 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003818}
3819
3820static void be_stats_cleanup(struct be_adapter *adapter)
3821{
Sathya Perla3abcded2010-10-03 22:12:27 -07003822 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003823
3824 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003825 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3826 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003827}
3828
3829static int be_stats_init(struct be_adapter *adapter)
3830{
Sathya Perla3abcded2010-10-03 22:12:27 -07003831 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832
Sathya Perlaca34fe32012-11-06 17:48:56 +00003833 if (lancer_chip(adapter))
3834 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3835 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003836 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003837 else
3838 /* BE3 and Skyhawk */
3839 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3840
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003841 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3842 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843 if (cmd->va == NULL)
3844 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003845 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003846 return 0;
3847}
3848
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003849static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003850{
3851 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003853 if (!adapter)
3854 return;
3855
Parav Pandit045508a2012-03-26 14:27:13 +00003856 be_roce_dev_remove(adapter);
3857
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003858 cancel_delayed_work_sync(&adapter->func_recovery_work);
3859
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003860 unregister_netdev(adapter->netdev);
3861
Sathya Perla5fb379e2009-06-18 00:02:59 +00003862 be_clear(adapter);
3863
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003864 /* tell fw we're done with firing cmds */
3865 be_cmd_fw_clean(adapter);
3866
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003867 be_stats_cleanup(adapter);
3868
3869 be_ctrl_cleanup(adapter);
3870
Sathya Perlad6b6d982012-09-05 01:56:48 +00003871 pci_disable_pcie_error_reporting(pdev);
3872
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003873 pci_set_drvdata(pdev, NULL);
3874 pci_release_regions(pdev);
3875 pci_disable_device(pdev);
3876
3877 free_netdev(adapter->netdev);
3878}
3879
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003880bool be_is_wol_supported(struct be_adapter *adapter)
3881{
3882 return ((adapter->wol_cap & BE_WOL_CAP) &&
3883 !be_is_wol_excluded(adapter)) ? true : false;
3884}
3885
Somnath Kotur941a77d2012-05-17 22:59:03 +00003886u32 be_get_fw_log_level(struct be_adapter *adapter)
3887{
3888 struct be_dma_mem extfat_cmd;
3889 struct be_fat_conf_params *cfgs;
3890 int status;
3891 u32 level = 0;
3892 int j;
3893
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003894 if (lancer_chip(adapter))
3895 return 0;
3896
Somnath Kotur941a77d2012-05-17 22:59:03 +00003897 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3898 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3899 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3900 &extfat_cmd.dma);
3901
3902 if (!extfat_cmd.va) {
3903 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3904 __func__);
3905 goto err;
3906 }
3907
3908 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3909 if (!status) {
3910 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3911 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003912 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003913 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3914 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3915 }
3916 }
3917 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3918 extfat_cmd.dma);
3919err:
3920 return level;
3921}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003922
Sathya Perla39f1d942012-05-08 19:41:24 +00003923static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003925 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003926 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003927
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003928 status = be_cmd_get_cntl_attributes(adapter);
3929 if (status)
3930 return status;
3931
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003932 status = be_cmd_get_acpi_wol_cap(adapter);
3933 if (status) {
3934 /* in case of a failure to get wol capabillities
3935 * check the exclusion list to determine WOL capability */
3936 if (!be_is_wol_excluded(adapter))
3937 adapter->wol_cap |= BE_WOL_CAP;
3938 }
3939
3940 if (be_is_wol_supported(adapter))
3941 adapter->wol = true;
3942
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003943 /* Must be a power of 2 or else MODULO will BUG_ON */
3944 adapter->be_get_temp_freq = 64;
3945
Somnath Kotur941a77d2012-05-17 22:59:03 +00003946 level = be_get_fw_log_level(adapter);
3947 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3948
Sathya Perla2243e2e2009-11-22 22:02:03 +00003949 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003950}
3951
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003952static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003953{
3954 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003955
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003956 status = lancer_test_and_set_rdy_state(adapter);
3957 if (status)
3958 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003959
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003960 if (netif_running(adapter->netdev))
3961 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003962
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003963 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003964
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003965 adapter->hw_error = false;
3966 adapter->fw_timeout = false;
3967
3968 status = be_setup(adapter);
3969 if (status)
3970 goto err;
3971
3972 if (netif_running(adapter->netdev)) {
3973 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003974 if (status)
3975 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003976 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003977
3978 dev_err(&adapter->pdev->dev,
3979 "Adapter SLIPORT recovery succeeded\n");
3980 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003981err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003982 if (adapter->eeh_error)
3983 dev_err(&adapter->pdev->dev,
3984 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003985
3986 return status;
3987}
3988
3989static void be_func_recovery_task(struct work_struct *work)
3990{
3991 struct be_adapter *adapter =
3992 container_of(work, struct be_adapter, func_recovery_work.work);
3993 int status;
3994
3995 be_detect_error(adapter);
3996
3997 if (adapter->hw_error && lancer_chip(adapter)) {
3998
3999 if (adapter->eeh_error)
4000 goto out;
4001
4002 rtnl_lock();
4003 netif_device_detach(adapter->netdev);
4004 rtnl_unlock();
4005
4006 status = lancer_recover_func(adapter);
4007
4008 if (!status)
4009 netif_device_attach(adapter->netdev);
4010 }
4011
4012out:
4013 schedule_delayed_work(&adapter->func_recovery_work,
4014 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004015}
4016
4017static void be_worker(struct work_struct *work)
4018{
4019 struct be_adapter *adapter =
4020 container_of(work, struct be_adapter, work.work);
4021 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004022 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004023 int i;
4024
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004025 /* when interrupts are not yet enabled, just reap any pending
4026 * mcc completions */
4027 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004028 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004029 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004030 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004031 goto reschedule;
4032 }
4033
4034 if (!adapter->stats_cmd_sent) {
4035 if (lancer_chip(adapter))
4036 lancer_cmd_get_pport_stats(adapter,
4037 &adapter->stats_cmd);
4038 else
4039 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4040 }
4041
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004042 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4043 be_cmd_get_die_temperature(adapter);
4044
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004045 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004046 if (rxo->rx_post_starved) {
4047 rxo->rx_post_starved = false;
4048 be_post_rx_frags(rxo, GFP_KERNEL);
4049 }
4050 }
4051
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004052 for_all_evt_queues(adapter, eqo, i)
4053 be_eqd_update(adapter, eqo);
4054
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004055reschedule:
4056 adapter->work_counter++;
4057 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4058}
4059
Sathya Perla39f1d942012-05-08 19:41:24 +00004060static bool be_reset_required(struct be_adapter *adapter)
4061{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004062 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004063}
4064
Sathya Perlad3791422012-09-28 04:39:44 +00004065static char *mc_name(struct be_adapter *adapter)
4066{
4067 if (adapter->function_mode & FLEX10_MODE)
4068 return "FLEX10";
4069 else if (adapter->function_mode & VNIC_MODE)
4070 return "vNIC";
4071 else if (adapter->function_mode & UMC_ENABLED)
4072 return "UMC";
4073 else
4074 return "";
4075}
4076
4077static inline char *func_name(struct be_adapter *adapter)
4078{
4079 return be_physfn(adapter) ? "PF" : "VF";
4080}
4081
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004082static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004083{
4084 int status = 0;
4085 struct be_adapter *adapter;
4086 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004087 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004088
4089 status = pci_enable_device(pdev);
4090 if (status)
4091 goto do_none;
4092
4093 status = pci_request_regions(pdev, DRV_NAME);
4094 if (status)
4095 goto disable_dev;
4096 pci_set_master(pdev);
4097
Sathya Perla7f640062012-06-05 19:37:20 +00004098 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004099 if (netdev == NULL) {
4100 status = -ENOMEM;
4101 goto rel_reg;
4102 }
4103 adapter = netdev_priv(netdev);
4104 adapter->pdev = pdev;
4105 pci_set_drvdata(pdev, adapter);
4106 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004107 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004108
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004109 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004110 if (!status) {
4111 netdev->features |= NETIF_F_HIGHDMA;
4112 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004113 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004114 if (status) {
4115 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4116 goto free_netdev;
4117 }
4118 }
4119
Sathya Perlad6b6d982012-09-05 01:56:48 +00004120 status = pci_enable_pcie_error_reporting(pdev);
4121 if (status)
4122 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004124 status = be_ctrl_init(adapter);
4125 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004126 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004127
Sathya Perla2243e2e2009-11-22 22:02:03 +00004128 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004129 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004130 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004131 if (status)
4132 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004133 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004134
4135 /* tell fw we're ready to fire cmds */
4136 status = be_cmd_fw_init(adapter);
4137 if (status)
4138 goto ctrl_clean;
4139
Sathya Perla39f1d942012-05-08 19:41:24 +00004140 if (be_reset_required(adapter)) {
4141 status = be_cmd_reset_function(adapter);
4142 if (status)
4143 goto ctrl_clean;
4144 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004145
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004146 /* The INTR bit may be set in the card when probed by a kdump kernel
4147 * after a crash.
4148 */
4149 if (!lancer_chip(adapter))
4150 be_intr_set(adapter, false);
4151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004152 status = be_stats_init(adapter);
4153 if (status)
4154 goto ctrl_clean;
4155
Sathya Perla39f1d942012-05-08 19:41:24 +00004156 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004157 if (status)
4158 goto stats_clean;
4159
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004160 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004161 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004162 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004163
Sathya Perla5fb379e2009-06-18 00:02:59 +00004164 status = be_setup(adapter);
4165 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004166 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004167
Sathya Perla3abcded2010-10-03 22:12:27 -07004168 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004169 status = register_netdev(netdev);
4170 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004171 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004172
Parav Pandit045508a2012-03-26 14:27:13 +00004173 be_roce_dev_add(adapter);
4174
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004175 schedule_delayed_work(&adapter->func_recovery_work,
4176 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004177
4178 be_cmd_query_port_name(adapter, &port_name);
4179
Sathya Perlad3791422012-09-28 04:39:44 +00004180 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4181 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004182
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004183 return 0;
4184
Sathya Perla5fb379e2009-06-18 00:02:59 +00004185unsetup:
4186 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004187stats_clean:
4188 be_stats_cleanup(adapter);
4189ctrl_clean:
4190 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004191free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004192 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004193 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004194rel_reg:
4195 pci_release_regions(pdev);
4196disable_dev:
4197 pci_disable_device(pdev);
4198do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004199 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004200 return status;
4201}
4202
4203static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4204{
4205 struct be_adapter *adapter = pci_get_drvdata(pdev);
4206 struct net_device *netdev = adapter->netdev;
4207
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004208 if (adapter->wol)
4209 be_setup_wol(adapter, true);
4210
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004211 cancel_delayed_work_sync(&adapter->func_recovery_work);
4212
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004213 netif_device_detach(netdev);
4214 if (netif_running(netdev)) {
4215 rtnl_lock();
4216 be_close(netdev);
4217 rtnl_unlock();
4218 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004219 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004220
4221 pci_save_state(pdev);
4222 pci_disable_device(pdev);
4223 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4224 return 0;
4225}
4226
4227static int be_resume(struct pci_dev *pdev)
4228{
4229 int status = 0;
4230 struct be_adapter *adapter = pci_get_drvdata(pdev);
4231 struct net_device *netdev = adapter->netdev;
4232
4233 netif_device_detach(netdev);
4234
4235 status = pci_enable_device(pdev);
4236 if (status)
4237 return status;
4238
4239 pci_set_power_state(pdev, 0);
4240 pci_restore_state(pdev);
4241
Sathya Perla2243e2e2009-11-22 22:02:03 +00004242 /* tell fw we're ready to fire cmds */
4243 status = be_cmd_fw_init(adapter);
4244 if (status)
4245 return status;
4246
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004247 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004248 if (netif_running(netdev)) {
4249 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250 be_open(netdev);
4251 rtnl_unlock();
4252 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004253
4254 schedule_delayed_work(&adapter->func_recovery_work,
4255 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004256 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004257
4258 if (adapter->wol)
4259 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004260
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004261 return 0;
4262}
4263
Sathya Perla82456b02010-02-17 01:35:37 +00004264/*
4265 * An FLR will stop BE from DMAing any data.
4266 */
4267static void be_shutdown(struct pci_dev *pdev)
4268{
4269 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004270
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004271 if (!adapter)
4272 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004273
Sathya Perla0f4a6822011-03-21 20:49:28 +00004274 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004275 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004276
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004277 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004278
Ajit Khaparde57841862011-04-06 18:08:43 +00004279 be_cmd_reset_function(adapter);
4280
Sathya Perla82456b02010-02-17 01:35:37 +00004281 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004282}
4283
Sathya Perlacf588472010-02-14 21:22:01 +00004284static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4285 pci_channel_state_t state)
4286{
4287 struct be_adapter *adapter = pci_get_drvdata(pdev);
4288 struct net_device *netdev = adapter->netdev;
4289
4290 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4291
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004292 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004293
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004294 cancel_delayed_work_sync(&adapter->func_recovery_work);
4295
4296 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004297 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004298 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004299
4300 if (netif_running(netdev)) {
4301 rtnl_lock();
4302 be_close(netdev);
4303 rtnl_unlock();
4304 }
4305 be_clear(adapter);
4306
4307 if (state == pci_channel_io_perm_failure)
4308 return PCI_ERS_RESULT_DISCONNECT;
4309
4310 pci_disable_device(pdev);
4311
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004312 /* The error could cause the FW to trigger a flash debug dump.
4313 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004314 * can cause it not to recover; wait for it to finish.
4315 * Wait only for first function as it is needed only once per
4316 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004317 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004318 if (pdev->devfn == 0)
4319 ssleep(30);
4320
Sathya Perlacf588472010-02-14 21:22:01 +00004321 return PCI_ERS_RESULT_NEED_RESET;
4322}
4323
4324static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4325{
4326 struct be_adapter *adapter = pci_get_drvdata(pdev);
4327 int status;
4328
4329 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004330 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004331
4332 status = pci_enable_device(pdev);
4333 if (status)
4334 return PCI_ERS_RESULT_DISCONNECT;
4335
4336 pci_set_master(pdev);
4337 pci_set_power_state(pdev, 0);
4338 pci_restore_state(pdev);
4339
4340 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004341 dev_info(&adapter->pdev->dev,
4342 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004343 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004344 if (status)
4345 return PCI_ERS_RESULT_DISCONNECT;
4346
Sathya Perlad6b6d982012-09-05 01:56:48 +00004347 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004348 return PCI_ERS_RESULT_RECOVERED;
4349}
4350
4351static void be_eeh_resume(struct pci_dev *pdev)
4352{
4353 int status = 0;
4354 struct be_adapter *adapter = pci_get_drvdata(pdev);
4355 struct net_device *netdev = adapter->netdev;
4356
4357 dev_info(&adapter->pdev->dev, "EEH resume\n");
4358
4359 pci_save_state(pdev);
4360
4361 /* tell fw we're ready to fire cmds */
4362 status = be_cmd_fw_init(adapter);
4363 if (status)
4364 goto err;
4365
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004366 status = be_cmd_reset_function(adapter);
4367 if (status)
4368 goto err;
4369
Sathya Perlacf588472010-02-14 21:22:01 +00004370 status = be_setup(adapter);
4371 if (status)
4372 goto err;
4373
4374 if (netif_running(netdev)) {
4375 status = be_open(netdev);
4376 if (status)
4377 goto err;
4378 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004379
4380 schedule_delayed_work(&adapter->func_recovery_work,
4381 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004382 netif_device_attach(netdev);
4383 return;
4384err:
4385 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004386}
4387
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004388static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004389 .error_detected = be_eeh_err_detected,
4390 .slot_reset = be_eeh_reset,
4391 .resume = be_eeh_resume,
4392};
4393
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004394static struct pci_driver be_driver = {
4395 .name = DRV_NAME,
4396 .id_table = be_dev_ids,
4397 .probe = be_probe,
4398 .remove = be_remove,
4399 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004400 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004401 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004402 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004403};
4404
4405static int __init be_init_module(void)
4406{
Joe Perches8e95a202009-12-03 07:58:21 +00004407 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4408 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004409 printk(KERN_WARNING DRV_NAME
4410 " : Module param rx_frag_size must be 2048/4096/8192."
4411 " Using 2048\n");
4412 rx_frag_size = 2048;
4413 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004414
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004415 return pci_register_driver(&be_driver);
4416}
4417module_init(be_init_module);
4418
4419static void __exit be_exit_module(void)
4420{
4421 pci_unregister_driver(&be_driver);
4422}
4423module_exit(be_exit_module);