blob: 528a886bc2cdb8f7bc51cbc1f7c62bd2ccaa3a87 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432}
433
Sathya Perlaab1594e2011-07-25 19:10:15 +0000434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000437 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700439 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000440 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000441 u64 pkts, bytes;
442 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444
Sathya Perla3abcded2010-10-03 22:12:27 -0700445 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700457 }
458
Sathya Perla3c8def92011-06-12 20:01:58 +0000459 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469
470 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000480 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000483 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000486
Sathya Perlaab1594e2011-07-25 19:10:15 +0000487 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488
489 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000497 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498}
499
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502 struct net_device *netdev = adapter->netdev;
503
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000505 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513}
514
Sathya Perla3c8def92011-06-12 20:01:58 +0000515static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
Sathya Perla3c8def92011-06-12 20:01:58 +0000518 struct be_tx_stats *stats = tx_stats(txo);
519
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 /* to account for hdr wrb */
539 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000546 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
Somnath Koturcc4ce022010-10-21 07:11:14 -0700574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000577 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700578
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000583 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700606 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000626 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000627 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000632 }
633}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
Sathya Perla3c8def92011-06-12 20:01:58 +0000635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
Sathya Perla7101e112010-03-22 20:41:12 +0000638 dma_addr_t busaddr;
639 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000644 bool map_single = false;
645 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700652 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000655 goto dma_err;
656 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000665 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700666 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000667 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000668 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000669 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000670 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000675 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
Somnath Koturcc4ce022010-10-21 07:11:14 -0700685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000693 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Stephen Hemminger613573252009-08-31 19:50:58 +0000701static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700702 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703{
704 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000711 /* For vlan tagged pkts, BE
712 * 1) calculates checksum even when CSO is not requested
713 * 2) calculates checksum wrongly for padded pkt less than
714 * 60 bytes long.
715 * As a workaround disable TX vlan offloading in such cases.
716 */
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731
Sathya Perla3c8def92011-06-12 20:01:58 +0000732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000733 if (copied) {
734 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 /* Ensure txq has space for the next skb; Else stop the queue
739 * *BEFORE* ringing the tx doorbell, so that we serialze the
740 * tx compls of the current transmit which'll wake up the queue
741 */
Sathya Perla7101e112010-03-22 20:41:12 +0000742 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000746 stopped = true;
747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000749 be_txq_notify(adapter, txq->id, wrb_cnt);
750
Sathya Perla3c8def92011-06-12 20:01:58 +0000751 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000752 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000757tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000780 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000788 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789
790 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
Ajit Khaparde82903e42010-02-09 01:34:57 +0000800 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000802 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803 if (adapter->vlan_tag[i]) {
804 vtag[ntags] = cpu_to_le16(i);
805 ntags++;
806 }
807 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700811 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000814
Sathya Perlab31c50a2009-09-17 10:30:13 -0700815 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816}
817
Jiri Pirko8e586132011-12-08 19:52:37 -0500818static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
820 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000821 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000823 if (!be_physfn(adapter)) {
824 status = -EINVAL;
825 goto ret;
826 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000827
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000829 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000830 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500831
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000832 if (!status)
833 adapter->vlans_added++;
834 else
835 adapter->vlan_tag[vid] = 0;
836ret:
837 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Jiri Pirko8e586132011-12-08 19:52:37 -0500840static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841{
842 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!be_physfn(adapter)) {
846 status = -EINVAL;
847 goto ret;
848 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000849
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000851 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000852 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500853
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000854 if (!status)
855 adapter->vlans_added--;
856 else
857 adapter->vlan_tag[vid] = 1;
858ret:
859 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860}
861
Sathya Perlaa54769f2011-10-24 02:45:00 +0000862static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863{
864 struct be_adapter *adapter = netdev_priv(netdev);
865
866 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000868 adapter->promiscuous = true;
869 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000871
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300872 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000873 if (adapter->promiscuous) {
874 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000875 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000876
877 if (adapter->vlans_added)
878 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000879 }
880
Sathya Perlae7b909a2009-11-22 22:01:10 +0000881 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000882 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000883 netdev_mc_count(netdev) > BE_MAX_MC) {
884 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000885 goto done;
886 }
887
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000888 if (netdev_uc_count(netdev) != adapter->uc_macs) {
889 struct netdev_hw_addr *ha;
890 int i = 1; /* First slot is claimed by the Primary MAC */
891
892 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
893 be_cmd_pmac_del(adapter, adapter->if_handle,
894 adapter->pmac_id[i], 0);
895 }
896
897 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
898 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
899 adapter->promiscuous = true;
900 goto done;
901 }
902
903 netdev_for_each_uc_addr(ha, adapter->netdev) {
904 adapter->uc_macs++; /* First slot is for Primary MAC */
905 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
906 adapter->if_handle,
907 &adapter->pmac_id[adapter->uc_macs], 0);
908 }
909 }
910
Sathya Perla5b8821b2011-08-02 19:57:44 +0000911 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000912done:
913 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700914}
915
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000916static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
917{
918 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000919 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000920 int status;
921
Sathya Perla11ac75e2011-12-13 00:58:50 +0000922 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000923 return -EPERM;
924
Sathya Perla11ac75e2011-12-13 00:58:50 +0000925 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000926 return -EINVAL;
927
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000928 if (lancer_chip(adapter)) {
929 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
930 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000931 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
932 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000933
Sathya Perla11ac75e2011-12-13 00:58:50 +0000934 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
935 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000936 }
937
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000938 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000939 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
940 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000941 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000942 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000943
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return status;
945}
946
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000947static int be_get_vf_config(struct net_device *netdev, int vf,
948 struct ifla_vf_info *vi)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000951 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000952
Sathya Perla11ac75e2011-12-13 00:58:50 +0000953 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000954 return -EPERM;
955
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957 return -EINVAL;
958
959 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000960 vi->tx_rate = vf_cfg->tx_rate;
961 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000962 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000964
965 return 0;
966}
967
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000968static int be_set_vf_vlan(struct net_device *netdev,
969 int vf, u16 vlan, u8 qos)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
972 int status = 0;
973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000975 return -EPERM;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000978 return -EINVAL;
979
980 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000981 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
982 /* If this is new value, program it. Else skip. */
983 adapter->vf_cfg[vf].vlan_tag = vlan;
984
985 status = be_cmd_set_hsw_config(adapter, vlan,
986 vf + 1, adapter->vf_cfg[vf].if_handle);
987 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000988 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000989 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +0000990 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000991 vlan = adapter->vf_cfg[vf].def_vid;
992 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
993 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000994 }
995
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000996
997 if (status)
998 dev_info(&adapter->pdev->dev,
999 "VLAN %d config on VF %d failed\n", vlan, vf);
1000 return status;
1001}
1002
Ajit Khapardee1d18732010-07-23 01:52:13 +00001003static int be_set_vf_tx_rate(struct net_device *netdev,
1004 int vf, int rate)
1005{
1006 struct be_adapter *adapter = netdev_priv(netdev);
1007 int status = 0;
1008
Sathya Perla11ac75e2011-12-13 00:58:50 +00001009 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001010 return -EPERM;
1011
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001012 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001013 return -EINVAL;
1014
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001015 if (rate < 100 || rate > 10000) {
1016 dev_err(&adapter->pdev->dev,
1017 "tx rate must be between 100 and 10000 Mbps\n");
1018 return -EINVAL;
1019 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001020
Ajit Khaparde856c4012011-02-11 13:32:32 +00001021 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001022
1023 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001024 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001025 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001026 else
1027 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001028 return status;
1029}
1030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001031static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001033 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001034 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001035 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001036 u64 pkts;
1037 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001038
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001039 if (!eqo->enable_aic) {
1040 eqd = eqo->eqd;
1041 goto modify_eqd;
1042 }
1043
1044 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001045 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001047 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1048
Sathya Perla4097f662009-03-24 16:40:13 -07001049 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001050 if (time_before(now, stats->rx_jiffies)) {
1051 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001052 return;
1053 }
1054
Sathya Perlaac124ff2011-07-25 19:10:14 +00001055 /* Update once a second */
1056 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001057 return;
1058
Sathya Perlaab1594e2011-07-25 19:10:15 +00001059 do {
1060 start = u64_stats_fetch_begin_bh(&stats->sync);
1061 pkts = stats->rx_pkts;
1062 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1063
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001064 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001065 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001067 eqd = (stats->rx_pps / 110000) << 3;
1068 eqd = min(eqd, eqo->max_eqd);
1069 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001070 if (eqd < 10)
1071 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001072
1073modify_eqd:
1074 if (eqd != eqo->cur_eqd) {
1075 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1076 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001077 }
Sathya Perla4097f662009-03-24 16:40:13 -07001078}
1079
Sathya Perla3abcded2010-10-03 22:12:27 -07001080static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001081 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001082{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001083 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001084
Sathya Perlaab1594e2011-07-25 19:10:15 +00001085 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001086 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001087 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001088 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001089 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001090 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001091 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001092 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001093 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094}
1095
Sathya Perla2e588f82011-03-11 02:49:26 +00001096static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001097{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001098 /* L4 checksum is not reliable for non TCP/UDP packets.
1099 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1101 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001102}
1103
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001104static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1105 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001107 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001109 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110
Sathya Perla3abcded2010-10-03 22:12:27 -07001111 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 BUG_ON(!rx_page_info->page);
1113
Ajit Khaparde205859a2010-02-09 01:34:21 +00001114 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001115 dma_unmap_page(&adapter->pdev->dev,
1116 dma_unmap_addr(rx_page_info, bus),
1117 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001118 rx_page_info->last_page_user = false;
1119 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120
1121 atomic_dec(&rxq->used);
1122 return rx_page_info;
1123}
1124
1125/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001126static void be_rx_compl_discard(struct be_rx_obj *rxo,
1127 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128{
Sathya Perla3abcded2010-10-03 22:12:27 -07001129 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001131 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001133 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001134 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001135 put_page(page_info->page);
1136 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001137 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 }
1139}
1140
1141/*
1142 * skb_fill_rx_data forms a complete skb for an ether frame
1143 * indicated by rxcp.
1144 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001145static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1146 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147{
Sathya Perla3abcded2010-10-03 22:12:27 -07001148 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001150 u16 i, j;
1151 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152 u8 *start;
1153
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001154 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155 start = page_address(page_info->page) + page_info->page_offset;
1156 prefetch(start);
1157
1158 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160
1161 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001162 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163 memcpy(skb->data, start, hdr_len);
1164 skb->len = curr_frag_len;
1165 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1166 /* Complete packet has now been moved to data */
1167 put_page(page_info->page);
1168 skb->data_len = 0;
1169 skb->tail += curr_frag_len;
1170 } else {
1171 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001172 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 skb_shinfo(skb)->frags[0].page_offset =
1174 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001175 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001177 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 skb->tail += hdr_len;
1179 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001180 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
Sathya Perla2e588f82011-03-11 02:49:26 +00001182 if (rxcp->pkt_size <= rx_frag_size) {
1183 BUG_ON(rxcp->num_rcvd != 1);
1184 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 }
1186
1187 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001188 index_inc(&rxcp->rxq_idx, rxq->len);
1189 remaining = rxcp->pkt_size - curr_frag_len;
1190 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001191 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001194 /* Coalesce all frags from the same physical page in one slot */
1195 if (page_info->page_offset == 0) {
1196 /* Fresh page */
1197 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001198 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001199 skb_shinfo(skb)->frags[j].page_offset =
1200 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001201 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001202 skb_shinfo(skb)->nr_frags++;
1203 } else {
1204 put_page(page_info->page);
1205 }
1206
Eric Dumazet9e903e02011-10-18 21:00:24 +00001207 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 skb->len += curr_frag_len;
1209 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001210 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001211 remaining -= curr_frag_len;
1212 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001213 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001215 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216}
1217
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001218/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001219static void be_rx_compl_process(struct be_rx_obj *rxo,
1220 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001222 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001223 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001225
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001226 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001227 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001228 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001229 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 return;
1231 }
1232
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001233 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001235 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001236 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001237 else
1238 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001240 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001241 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001242 skb->rxhash = rxcp->rss_hash;
1243
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244
Jiri Pirko343e43c2011-08-25 02:50:51 +00001245 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001246 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1247
1248 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249}
1250
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001251/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001252void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1253 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001255 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001257 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001258 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 u16 remaining, curr_frag_len;
1260 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001262 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001264 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001265 return;
1266 }
1267
Sathya Perla2e588f82011-03-11 02:49:26 +00001268 remaining = rxcp->pkt_size;
1269 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001270 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
1272 curr_frag_len = min(remaining, rx_frag_size);
1273
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001274 /* Coalesce all frags from the same physical page in one slot */
1275 if (i == 0 || page_info->page_offset == 0) {
1276 /* First frag or Fresh page */
1277 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001278 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001279 skb_shinfo(skb)->frags[j].page_offset =
1280 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001281 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001282 } else {
1283 put_page(page_info->page);
1284 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001285 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001286 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001288 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 memset(page_info, 0, sizeof(*page_info));
1290 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001291 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001293 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001294 skb->len = rxcp->pkt_size;
1295 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001296 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001297 if (adapter->netdev->features & NETIF_F_RXHASH)
1298 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001299
Jiri Pirko343e43c2011-08-25 02:50:51 +00001300 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001301 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304}
1305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001306static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308{
Sathya Perla2e588f82011-03-11 02:49:26 +00001309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1333 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001334 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001336}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001338static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1339 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001340{
1341 rxcp->pkt_size =
1342 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1343 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1344 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1345 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001346 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001347 rxcp->ip_csum =
1348 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1349 rxcp->l4_csum =
1350 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1351 rxcp->ipv6 =
1352 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1353 rxcp->rxq_idx =
1354 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1355 rxcp->num_rcvd =
1356 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1357 rxcp->pkt_type =
1358 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001359 rxcp->rss_hash =
1360 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001361 if (rxcp->vlanf) {
1362 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001363 compl);
1364 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1365 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001366 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001367 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001368}
1369
1370static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1371{
1372 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1373 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1374 struct be_adapter *adapter = rxo->adapter;
1375
1376 /* For checking the valid bit it is Ok to use either definition as the
1377 * valid bit is at the same position in both v0 and v1 Rx compl */
1378 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 return NULL;
1380
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001381 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001382 be_dws_le_to_cpu(compl, sizeof(*compl));
1383
1384 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001385 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001386 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001387 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001388
Sathya Perla15d72182011-03-21 20:49:26 +00001389 if (rxcp->vlanf) {
1390 /* vlanf could be wrongly set in some cards.
1391 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001392 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001393 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001394
Sathya Perla15d72182011-03-21 20:49:26 +00001395 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001396 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001397
Somnath Kotur939cf302011-08-18 21:51:49 -07001398 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001399 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001400 rxcp->vlanf = 0;
1401 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001402
1403 /* As the compl has been parsed, reset it; we wont touch it again */
1404 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405
Sathya Perla3abcded2010-10-03 22:12:27 -07001406 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 return rxcp;
1408}
1409
Eric Dumazet1829b082011-03-01 05:48:12 +00001410static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001413
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001415 gfp |= __GFP_COMP;
1416 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417}
1418
1419/*
1420 * Allocate a page, split it to fragments of size rx_frag_size and post as
1421 * receive buffers to BE
1422 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001423static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424{
Sathya Perla3abcded2010-10-03 22:12:27 -07001425 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001426 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 struct page *pagep = NULL;
1429 struct be_eth_rx_d *rxd;
1430 u64 page_dmaaddr = 0, frag_dmaaddr;
1431 u32 posted, page_offset = 0;
1432
Sathya Perla3abcded2010-10-03 22:12:27 -07001433 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1435 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001436 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001438 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439 break;
1440 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001441 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1442 0, adapter->big_page_size,
1443 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 page_info->page_offset = 0;
1445 } else {
1446 get_page(pagep);
1447 page_info->page_offset = page_offset + rx_frag_size;
1448 }
1449 page_offset = page_info->page_offset;
1450 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001451 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1453
1454 rxd = queue_head_node(rxq);
1455 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1456 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457
1458 /* Any space left in the current big page for another frag? */
1459 if ((page_offset + rx_frag_size + rx_frag_size) >
1460 adapter->big_page_size) {
1461 pagep = NULL;
1462 page_info->last_page_user = true;
1463 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001464
1465 prev_page_info = page_info;
1466 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001467 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468 }
1469 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001470 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471
1472 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001474 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001475 } else if (atomic_read(&rxq->used) == 0) {
1476 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479}
1480
Sathya Perla5fb379e2009-06-18 00:02:59 +00001481static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1484
1485 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1486 return NULL;
1487
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001488 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1490
1491 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1492
1493 queue_tail_inc(tx_cq);
1494 return txcp;
1495}
1496
Sathya Perla3c8def92011-06-12 20:01:58 +00001497static u16 be_tx_compl_process(struct be_adapter *adapter,
1498 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499{
Sathya Perla3c8def92011-06-12 20:01:58 +00001500 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001501 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001502 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001504 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1505 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001507 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001509 sent_skbs[txq->tail] = NULL;
1510
1511 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001512 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001514 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001516 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001517 unmap_tx_frag(&adapter->pdev->dev, wrb,
1518 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001519 unmap_skb_hdr = false;
1520
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 num_wrbs++;
1522 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001523 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001526 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527}
1528
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001529/* Return the number of events in the event queue */
1530static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001531{
1532 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001533 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001534
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001535 do {
1536 eqe = queue_tail_node(&eqo->q);
1537 if (eqe->evt == 0)
1538 break;
1539
1540 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001541 eqe->evt = 0;
1542 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001543 queue_tail_inc(&eqo->q);
1544 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001545
1546 return num;
1547}
1548
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001549static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001550{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 bool rearm = false;
1552 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001553
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554 /* Deal with any spurious interrupts that come without events */
1555 if (!num)
1556 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001557
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001559 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001560 napi_schedule(&eqo->napi);
1561
1562 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001563}
1564
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001565/* Leaves the EQ is disarmed state */
1566static void be_eq_clean(struct be_eq_obj *eqo)
1567{
1568 int num = events_get(eqo);
1569
1570 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1571}
1572
1573static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574{
1575 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001576 struct be_queue_info *rxq = &rxo->q;
1577 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001578 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579 u16 tail;
1580
1581 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001582 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001583 be_rx_compl_discard(rxo, rxcp);
1584 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 }
1586
1587 /* Then free posted rx buffer that were not used */
1588 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001589 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 put_page(page_info->page);
1592 memset(page_info, 0, sizeof(*page_info));
1593 }
1594 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001595 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596}
1597
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001598static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001600 struct be_tx_obj *txo;
1601 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001602 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001603 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001604 struct sk_buff *sent_skb;
1605 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001606 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607
Sathya Perlaa8e91792009-08-10 03:42:43 +00001608 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1609 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001610 pending_txqs = adapter->num_tx_qs;
1611
1612 for_all_tx_queues(adapter, txo, i) {
1613 txq = &txo->q;
1614 while ((txcp = be_tx_compl_get(&txo->cq))) {
1615 end_idx =
1616 AMAP_GET_BITS(struct amap_eth_tx_compl,
1617 wrb_index, txcp);
1618 num_wrbs += be_tx_compl_process(adapter, txo,
1619 end_idx);
1620 cmpl++;
1621 }
1622 if (cmpl) {
1623 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1624 atomic_sub(num_wrbs, &txq->used);
1625 cmpl = 0;
1626 num_wrbs = 0;
1627 }
1628 if (atomic_read(&txq->used) == 0)
1629 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001630 }
1631
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001632 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001633 break;
1634
1635 mdelay(1);
1636 } while (true);
1637
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001638 for_all_tx_queues(adapter, txo, i) {
1639 txq = &txo->q;
1640 if (atomic_read(&txq->used))
1641 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1642 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001643
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001644 /* free posted tx for which compls will never arrive */
1645 while (atomic_read(&txq->used)) {
1646 sent_skb = txo->sent_skb_list[txq->tail];
1647 end_idx = txq->tail;
1648 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1649 &dummy_wrb);
1650 index_adv(&end_idx, num_wrbs - 1, txq->len);
1651 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1652 atomic_sub(num_wrbs, &txq->used);
1653 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001654 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655}
1656
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657static void be_evt_queues_destroy(struct be_adapter *adapter)
1658{
1659 struct be_eq_obj *eqo;
1660 int i;
1661
1662 for_all_evt_queues(adapter, eqo, i) {
1663 be_eq_clean(eqo);
1664 if (eqo->q.created)
1665 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1666 be_queue_free(adapter, &eqo->q);
1667 }
1668}
1669
1670static int be_evt_queues_create(struct be_adapter *adapter)
1671{
1672 struct be_queue_info *eq;
1673 struct be_eq_obj *eqo;
1674 int i, rc;
1675
1676 adapter->num_evt_qs = num_irqs(adapter);
1677
1678 for_all_evt_queues(adapter, eqo, i) {
1679 eqo->adapter = adapter;
1680 eqo->tx_budget = BE_TX_BUDGET;
1681 eqo->idx = i;
1682 eqo->max_eqd = BE_MAX_EQD;
1683 eqo->enable_aic = true;
1684
1685 eq = &eqo->q;
1686 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1687 sizeof(struct be_eq_entry));
1688 if (rc)
1689 return rc;
1690
1691 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1692 if (rc)
1693 return rc;
1694 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001695 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001696}
1697
Sathya Perla5fb379e2009-06-18 00:02:59 +00001698static void be_mcc_queues_destroy(struct be_adapter *adapter)
1699{
1700 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001701
Sathya Perla8788fdc2009-07-27 22:52:03 +00001702 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001703 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001704 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001705 be_queue_free(adapter, q);
1706
Sathya Perla8788fdc2009-07-27 22:52:03 +00001707 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001708 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001709 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001710 be_queue_free(adapter, q);
1711}
1712
1713/* Must be called only after TX qs are created as MCC shares TX EQ */
1714static int be_mcc_queues_create(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001717
Sathya Perla8788fdc2009-07-27 22:52:03 +00001718 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001719 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001720 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721 goto err;
1722
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001723 /* Use the default EQ for MCC completions */
1724 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001725 goto mcc_cq_free;
1726
Sathya Perla8788fdc2009-07-27 22:52:03 +00001727 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001728 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1729 goto mcc_cq_destroy;
1730
Sathya Perla8788fdc2009-07-27 22:52:03 +00001731 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001732 goto mcc_q_free;
1733
1734 return 0;
1735
1736mcc_q_free:
1737 be_queue_free(adapter, q);
1738mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001739 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001740mcc_cq_free:
1741 be_queue_free(adapter, cq);
1742err:
1743 return -1;
1744}
1745
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746static void be_tx_queues_destroy(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001749 struct be_tx_obj *txo;
1750 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
Sathya Perla3c8def92011-06-12 20:01:58 +00001752 for_all_tx_queues(adapter, txo, i) {
1753 q = &txo->q;
1754 if (q->created)
1755 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1756 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757
Sathya Perla3c8def92011-06-12 20:01:58 +00001758 q = &txo->cq;
1759 if (q->created)
1760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1761 be_queue_free(adapter, q);
1762 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763}
1764
Sathya Perladafc0fe2011-10-24 02:45:02 +00001765static int be_num_txqs_want(struct be_adapter *adapter)
1766{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001767 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001768 lancer_chip(adapter) || !be_physfn(adapter) ||
1769 adapter->generation == BE_GEN2)
1770 return 1;
1771 else
1772 return MAX_TX_QS;
1773}
1774
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001775static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001777 struct be_queue_info *cq, *eq;
1778 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001779 struct be_tx_obj *txo;
1780 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781
Sathya Perladafc0fe2011-10-24 02:45:02 +00001782 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001783 if (adapter->num_tx_qs != MAX_TX_QS) {
1784 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001785 netif_set_real_num_tx_queues(adapter->netdev,
1786 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001787 rtnl_unlock();
1788 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001789
Sathya Perla3c8def92011-06-12 20:01:58 +00001790 for_all_tx_queues(adapter, txo, i) {
1791 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001792 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1793 sizeof(struct be_eth_tx_compl));
1794 if (status)
1795 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797 /* If num_evt_qs is less than num_tx_qs, then more than
1798 * one txq share an eq
1799 */
1800 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1801 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1802 if (status)
1803 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001804 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806}
1807
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001808static int be_tx_qs_create(struct be_adapter *adapter)
1809{
1810 struct be_tx_obj *txo;
1811 int i, status;
1812
1813 for_all_tx_queues(adapter, txo, i) {
1814 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1815 sizeof(struct be_eth_wrb));
1816 if (status)
1817 return status;
1818
1819 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1820 if (status)
1821 return status;
1822 }
1823
1824 return 0;
1825}
1826
1827static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828{
1829 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 struct be_rx_obj *rxo;
1831 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
Sathya Perla3abcded2010-10-03 22:12:27 -07001833 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001834 q = &rxo->cq;
1835 if (q->created)
1836 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1837 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839}
1840
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001841static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001842{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001843 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001844 struct be_rx_obj *rxo;
1845 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001847 /* We'll create as many RSS rings as there are irqs.
1848 * But when there's only one irq there's no use creating RSS rings
1849 */
1850 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1851 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001854 for_all_rx_queues(adapter, rxo, i) {
1855 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001856 cq = &rxo->cq;
1857 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1858 sizeof(struct be_eth_rx_compl));
1859 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001860 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001862 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1863 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001864 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001865 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001868 if (adapter->num_rx_qs != MAX_RX_QS)
1869 dev_info(&adapter->pdev->dev,
1870 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001873}
1874
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875static irqreturn_t be_intx(int irq, void *dev)
1876{
1877 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001878 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001880 /* With INTx only one EQ is used */
1881 num_evts = event_handle(&adapter->eq_obj[0]);
1882 if (num_evts)
1883 return IRQ_HANDLED;
1884 else
1885 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886}
1887
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 return IRQ_HANDLED;
1894}
1895
Sathya Perla2e588f82011-03-11 02:49:26 +00001896static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897{
Sathya Perla2e588f82011-03-11 02:49:26 +00001898 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899}
1900
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1902 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903{
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 struct be_adapter *adapter = rxo->adapter;
1905 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001906 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 u32 work_done;
1908
1909 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001910 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911 if (!rxcp)
1912 break;
1913
Sathya Perla12004ae2011-08-02 19:57:46 +00001914 /* Is it a flush compl that has no data */
1915 if (unlikely(rxcp->num_rcvd == 0))
1916 goto loop_continue;
1917
1918 /* Discard compl with partial DMA Lancer B0 */
1919 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001921 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001922 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001923
Sathya Perla12004ae2011-08-02 19:57:46 +00001924 /* On BE drop pkts that arrive due to imperfect filtering in
1925 * promiscuous mode on some skews
1926 */
1927 if (unlikely(rxcp->port != adapter->port_num &&
1928 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001930 goto loop_continue;
1931 }
1932
1933 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001934 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001935 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001936 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001937loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001938 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 }
1940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 if (work_done) {
1942 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1945 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 return work_done;
1949}
1950
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1952 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001955 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 for (work_done = 0; work_done < budget; work_done++) {
1958 txcp = be_tx_compl_get(&txo->cq);
1959 if (!txcp)
1960 break;
1961 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001962 AMAP_GET_BITS(struct amap_eth_tx_compl,
1963 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964 }
1965
1966 if (work_done) {
1967 be_cq_notify(adapter, txo->cq.id, true, work_done);
1968 atomic_sub(num_wrbs, &txo->q.used);
1969
1970 /* As Tx wrbs have been freed up, wake up netdev queue
1971 * if it was stopped due to lack of tx wrbs. */
1972 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1973 atomic_read(&txo->q.used) < txo->q.len / 2) {
1974 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001975 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001976
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001977 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1978 tx_stats(txo)->tx_compl += work_done;
1979 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1980 }
1981 return (work_done < budget); /* Done */
1982}
Sathya Perla3c8def92011-06-12 20:01:58 +00001983
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001984int be_poll(struct napi_struct *napi, int budget)
1985{
1986 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1987 struct be_adapter *adapter = eqo->adapter;
1988 int max_work = 0, work, i;
1989 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00001990
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001991 /* Process all TXQs serviced by this EQ */
1992 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1993 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1994 eqo->tx_budget, i);
1995 if (!tx_done)
1996 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 }
1998
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999 /* This loop will iterate twice for EQ0 in which
2000 * completions of the last RXQ (default one) are also processed
2001 * For other EQs the loop iterates only once
2002 */
2003 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2004 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2005 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002006 }
2007
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002008 if (is_mcc_eqo(eqo))
2009 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002010
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002011 if (max_work < budget) {
2012 napi_complete(napi);
2013 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2014 } else {
2015 /* As we'll continue in polling mode, count and clear events */
2016 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002017 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019}
2020
Ajit Khaparded053de92010-09-03 06:23:30 +00002021void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002022{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002023 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2024 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002025 u32 i;
2026
Sathya Perla72f02482011-11-10 19:17:58 +00002027 if (adapter->eeh_err || adapter->ue_detected)
2028 return;
2029
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002030 if (lancer_chip(adapter)) {
2031 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2032 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2033 sliport_err1 = ioread32(adapter->db +
2034 SLIPORT_ERROR1_OFFSET);
2035 sliport_err2 = ioread32(adapter->db +
2036 SLIPORT_ERROR2_OFFSET);
2037 }
2038 } else {
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_LOW, &ue_lo);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_HIGH, &ue_hi);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2045 pci_read_config_dword(adapter->pdev,
2046 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002047
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002048 ue_lo = (ue_lo & (~ue_lo_mask));
2049 ue_hi = (ue_hi & (~ue_hi_mask));
2050 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002051
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002052 if (ue_lo || ue_hi ||
2053 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002054 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002055 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002056 dev_err(&adapter->pdev->dev,
2057 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002058 }
2059
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002060 if (ue_lo) {
2061 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2062 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002063 dev_err(&adapter->pdev->dev,
2064 "UE: %s bit set\n", ue_status_low_desc[i]);
2065 }
2066 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002067 if (ue_hi) {
2068 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2069 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070 dev_err(&adapter->pdev->dev,
2071 "UE: %s bit set\n", ue_status_hi_desc[i]);
2072 }
2073 }
2074
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002075 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2076 dev_err(&adapter->pdev->dev,
2077 "sliport status 0x%x\n", sliport_status);
2078 dev_err(&adapter->pdev->dev,
2079 "sliport error1 0x%x\n", sliport_err1);
2080 dev_err(&adapter->pdev->dev,
2081 "sliport error2 0x%x\n", sliport_err2);
2082 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002083}
2084
Sathya Perla8d56ff12009-11-22 22:02:26 +00002085static void be_msix_disable(struct be_adapter *adapter)
2086{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002087 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002088 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002089 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002090 }
2091}
2092
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093static uint be_num_rss_want(struct be_adapter *adapter)
2094{
2095 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2096 adapter->num_vfs == 0 && be_physfn(adapter) &&
2097 !be_is_mc(adapter))
2098 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2099 else
2100 return 0;
2101}
2102
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103static void be_msix_enable(struct be_adapter *adapter)
2104{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002106 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108 /* If RSS queues are not used, need a vec for default RX Q */
2109 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2110 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002111
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113 adapter->msix_entries[i].entry = i;
2114
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002115 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 if (status == 0) {
2117 goto done;
2118 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002119 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002120 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002121 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123 }
2124 return;
2125done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002126 adapter->num_msix_vec = num_vec;
2127 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128}
2129
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002130static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002131{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002132 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002133
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002134#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002135 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002136 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002137 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002138
2139 pos = pci_find_ext_capability(adapter->pdev,
2140 PCI_EXT_CAP_ID_SRIOV);
2141 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002142 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002143
Sathya Perla11ac75e2011-12-13 00:58:50 +00002144 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2145 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002146 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002147 "Device supports %d VFs and not %d\n",
2148 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002149
Sathya Perla11ac75e2011-12-13 00:58:50 +00002150 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2151 if (status)
2152 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002153
Sathya Perla11ac75e2011-12-13 00:58:50 +00002154 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002155 adapter->vf_cfg = kcalloc(num_vfs,
2156 sizeof(struct be_vf_cfg),
2157 GFP_KERNEL);
2158 if (!adapter->vf_cfg)
2159 return -ENOMEM;
2160 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002161 }
2162#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002163 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002164}
2165
2166static void be_sriov_disable(struct be_adapter *adapter)
2167{
2168#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002169 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002170 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002171 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002172 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002173 }
2174#endif
2175}
2176
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002177static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002178 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002180 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181}
2182
2183static int be_msix_register(struct be_adapter *adapter)
2184{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185 struct net_device *netdev = adapter->netdev;
2186 struct be_eq_obj *eqo;
2187 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189 for_all_evt_queues(adapter, eqo, i) {
2190 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2191 vec = be_msix_vec_get(adapter, eqo);
2192 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002193 if (status)
2194 goto err_msix;
2195 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002196
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2200 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2201 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2202 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002203 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 return status;
2205}
2206
2207static int be_irq_register(struct be_adapter *adapter)
2208{
2209 struct net_device *netdev = adapter->netdev;
2210 int status;
2211
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002212 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213 status = be_msix_register(adapter);
2214 if (status == 0)
2215 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002216 /* INTx is not supported for VF */
2217 if (!be_physfn(adapter))
2218 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 }
2220
2221 /* INTx */
2222 netdev->irq = adapter->pdev->irq;
2223 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2224 adapter);
2225 if (status) {
2226 dev_err(&adapter->pdev->dev,
2227 "INTx request IRQ failed - err %d\n", status);
2228 return status;
2229 }
2230done:
2231 adapter->isr_registered = true;
2232 return 0;
2233}
2234
2235static void be_irq_unregister(struct be_adapter *adapter)
2236{
2237 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002239 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240
2241 if (!adapter->isr_registered)
2242 return;
2243
2244 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002245 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246 free_irq(netdev->irq, adapter);
2247 goto done;
2248 }
2249
2250 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 for_all_evt_queues(adapter, eqo, i)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002253
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254done:
2255 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256}
2257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002259{
2260 struct be_queue_info *q;
2261 struct be_rx_obj *rxo;
2262 int i;
2263
2264 for_all_rx_queues(adapter, rxo, i) {
2265 q = &rxo->q;
2266 if (q->created) {
2267 be_cmd_rxq_destroy(adapter, q);
2268 /* After the rxq is invalidated, wait for a grace time
2269 * of 1ms for all dma to end and the flush compl to
2270 * arrive
2271 */
2272 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002274 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002276 }
2277}
2278
Sathya Perla889cd4b2010-05-30 23:33:45 +00002279static int be_close(struct net_device *netdev)
2280{
2281 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 struct be_eq_obj *eqo;
2283 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002284
Sathya Perla889cd4b2010-05-30 23:33:45 +00002285 be_async_mcc_disable(adapter);
2286
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002287 if (!lancer_chip(adapter))
2288 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 for_all_evt_queues(adapter, eqo, i) {
2291 napi_disable(&eqo->napi);
2292 if (msix_enabled(adapter))
2293 synchronize_irq(be_msix_vec_get(adapter, eqo));
2294 else
2295 synchronize_irq(netdev->irq);
2296 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002297 }
2298
Sathya Perla889cd4b2010-05-30 23:33:45 +00002299 be_irq_unregister(adapter);
2300
Sathya Perla889cd4b2010-05-30 23:33:45 +00002301 /* Wait for all pending tx completions to arrive so that
2302 * all tx skbs are freed.
2303 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002304 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002307 return 0;
2308}
2309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002311{
2312 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002313 int rc, i, j;
2314 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002315
2316 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002317 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2318 sizeof(struct be_eth_rx_d));
2319 if (rc)
2320 return rc;
2321 }
2322
2323 /* The FW would like the default RXQ to be created first */
2324 rxo = default_rxo(adapter);
2325 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2326 adapter->if_handle, false, &rxo->rss_id);
2327 if (rc)
2328 return rc;
2329
2330 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002331 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 rx_frag_size, adapter->if_handle,
2333 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002334 if (rc)
2335 return rc;
2336 }
2337
2338 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002339 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2340 for_all_rss_queues(adapter, rxo, i) {
2341 if ((j + i) >= 128)
2342 break;
2343 rsstable[j + i] = rxo->rss_id;
2344 }
2345 }
2346 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002347 if (rc)
2348 return rc;
2349 }
2350
2351 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002353 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002354 return 0;
2355}
2356
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357static int be_open(struct net_device *netdev)
2358{
2359 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002361 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002362 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002363 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002364 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002367 if (status)
2368 goto err;
2369
Sathya Perla5fb379e2009-06-18 00:02:59 +00002370 be_irq_register(adapter);
2371
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002372 if (!lancer_chip(adapter))
2373 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002375 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002376 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 for_all_tx_queues(adapter, txo, i)
2379 be_cq_notify(adapter, txo->cq.id, true, 0);
2380
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002381 be_async_mcc_enable(adapter);
2382
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 for_all_evt_queues(adapter, eqo, i) {
2384 napi_enable(&eqo->napi);
2385 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2386 }
2387
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002388 status = be_cmd_link_status_query(adapter, NULL, NULL,
2389 &link_status, 0);
2390 if (!status)
2391 be_link_status_update(adapter, link_status);
2392
Sathya Perla889cd4b2010-05-30 23:33:45 +00002393 return 0;
2394err:
2395 be_close(adapter->netdev);
2396 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397}
2398
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002399static int be_setup_wol(struct be_adapter *adapter, bool enable)
2400{
2401 struct be_dma_mem cmd;
2402 int status = 0;
2403 u8 mac[ETH_ALEN];
2404
2405 memset(mac, 0, ETH_ALEN);
2406
2407 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002408 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2409 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002410 if (cmd.va == NULL)
2411 return -1;
2412 memset(cmd.va, 0, cmd.size);
2413
2414 if (enable) {
2415 status = pci_write_config_dword(adapter->pdev,
2416 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2417 if (status) {
2418 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002419 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002420 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2421 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002422 return status;
2423 }
2424 status = be_cmd_enable_magic_wol(adapter,
2425 adapter->netdev->dev_addr, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2428 } else {
2429 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2430 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2431 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2432 }
2433
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002434 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002435 return status;
2436}
2437
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002438/*
2439 * Generate a seed MAC address from the PF MAC Address using jhash.
2440 * MAC Address for VFs are assigned incrementally starting from the seed.
2441 * These addresses are programmed in the ASIC by the PF and the VF driver
2442 * queries for the MAC address during its probe.
2443 */
2444static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2445{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002446 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002447 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002448 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002449 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002450
2451 be_vf_eth_addr_generate(adapter, mac);
2452
Sathya Perla11ac75e2011-12-13 00:58:50 +00002453 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002454 if (lancer_chip(adapter)) {
2455 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2456 } else {
2457 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002458 vf_cfg->if_handle,
2459 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002460 }
2461
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002462 if (status)
2463 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002464 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002465 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002466 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002467
2468 mac[5] += 1;
2469 }
2470 return status;
2471}
2472
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002473static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002474{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002475 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002476 u32 vf;
2477
Sathya Perla11ac75e2011-12-13 00:58:50 +00002478 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002479 if (lancer_chip(adapter))
2480 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2481 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002482 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2483 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002484
Sathya Perla11ac75e2011-12-13 00:58:50 +00002485 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2486 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487}
2488
Sathya Perlaa54769f2011-10-24 02:45:00 +00002489static int be_clear(struct be_adapter *adapter)
2490{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002491 int i = 1;
2492
Sathya Perla191eb752012-02-23 18:50:13 +00002493 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2494 cancel_delayed_work_sync(&adapter->work);
2495 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2496 }
2497
Sathya Perla11ac75e2011-12-13 00:58:50 +00002498 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002499 be_vf_clear(adapter);
2500
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002501 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2502 be_cmd_pmac_del(adapter, adapter->if_handle,
2503 adapter->pmac_id[i], 0);
2504
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002505 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002506
2507 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002508 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002509 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002511
2512 /* tell fw we're done with firing cmds */
2513 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002514
2515 be_msix_disable(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002516 kfree(adapter->pmac_id);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002517 return 0;
2518}
2519
Sathya Perla30128032011-11-10 19:17:57 +00002520static void be_vf_setup_init(struct be_adapter *adapter)
2521{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002522 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002523 int vf;
2524
Sathya Perla11ac75e2011-12-13 00:58:50 +00002525 for_all_vfs(adapter, vf_cfg, vf) {
2526 vf_cfg->if_handle = -1;
2527 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002528 }
2529}
2530
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002531static int be_vf_setup(struct be_adapter *adapter)
2532{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002533 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002534 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002535 u16 def_vlan, lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002536 int status;
2537
Sathya Perla30128032011-11-10 19:17:57 +00002538 be_vf_setup_init(adapter);
2539
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002540 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2541 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002542 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002543 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002544 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002545 if (status)
2546 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002547 }
2548
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002549 status = be_vf_eth_addr_config(adapter);
2550 if (status)
2551 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002552
Sathya Perla11ac75e2011-12-13 00:58:50 +00002553 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002554 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002555 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002556 if (status)
2557 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002558 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002559
2560 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2561 vf + 1, vf_cfg->if_handle);
2562 if (status)
2563 goto err;
2564 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002565 }
2566 return 0;
2567err:
2568 return status;
2569}
2570
Sathya Perla30128032011-11-10 19:17:57 +00002571static void be_setup_init(struct be_adapter *adapter)
2572{
2573 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1;
2575 adapter->if_handle = -1;
2576 adapter->be3_native = false;
2577 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0;
2579}
2580
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002581static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002582{
2583 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002584 int status;
2585 bool pmac_id_active;
2586
2587 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2588 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002589 if (status != 0)
2590 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002591
2592 if (pmac_id_active) {
2593 status = be_cmd_mac_addr_query(adapter, mac,
2594 MAC_ADDRESS_TYPE_NETWORK,
2595 false, adapter->if_handle, pmac_id);
2596
2597 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002598 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002599 } else {
2600 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002601 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002602 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002603do_none:
2604 return status;
2605}
2606
Sathya Perla5fb379e2009-06-18 00:02:59 +00002607static int be_setup(struct be_adapter *adapter)
2608{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002609 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002610 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002611 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002613 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614
Sathya Perla30128032011-11-10 19:17:57 +00002615 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002616
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002617 be_cmd_req_native_mode(adapter);
2618
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 be_msix_enable(adapter);
2620
2621 status = be_evt_queues_create(adapter);
2622 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002623 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 status = be_tx_cqs_create(adapter);
2626 if (status)
2627 goto err;
2628
2629 status = be_rx_cqs_create(adapter);
2630 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002631 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002635 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002637 memset(mac, 0, ETH_ALEN);
2638 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002639 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002640 if (status)
2641 return status;
2642 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2643 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2644
2645 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2646 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2647 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002648 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2649
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2651 cap_flags |= BE_IF_FLAGS_RSS;
2652 en_flags |= BE_IF_FLAGS_RSS;
2653 }
2654 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2655 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002656 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002657 if (status != 0)
2658 goto err;
2659
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002660 /* The VF's permanent mac queried from card is incorrect.
2661 * For BEx: Query the mac configued by the PF using if_handle
2662 * For Lancer: Get and use mac_list to obtain mac address.
2663 */
2664 if (!be_physfn(adapter)) {
2665 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002666 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002667 else
2668 status = be_cmd_mac_addr_query(adapter, mac,
2669 MAC_ADDRESS_TYPE_NETWORK, false,
2670 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002671 if (!status) {
2672 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2673 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2674 }
2675 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002676
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002677 status = be_tx_qs_create(adapter);
2678 if (status)
2679 goto err;
2680
Sathya Perla04b71172011-09-27 13:30:27 -04002681 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002682
Sathya Perlaa54769f2011-10-24 02:45:00 +00002683 status = be_vid_config(adapter, false, 0);
2684 if (status)
2685 goto err;
2686
2687 be_set_rx_mode(adapter->netdev);
2688
2689 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002690 /* For Lancer: It is legal for this cmd to fail on VF */
2691 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002692 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002693
Sathya Perlaa54769f2011-10-24 02:45:00 +00002694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2695 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2696 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002697 /* For Lancer: It is legal for this cmd to fail on VF */
2698 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002699 goto err;
2700 }
2701
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002702 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703
Sathya Perla11ac75e2011-12-13 00:58:50 +00002704 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002705 status = be_vf_setup(adapter);
2706 if (status)
2707 goto err;
2708 }
2709
Sathya Perla191eb752012-02-23 18:50:13 +00002710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2712
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002713 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002714err:
2715 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716 return status;
2717}
2718
Ivan Vecera66268732011-12-08 01:31:21 +00002719#ifdef CONFIG_NET_POLL_CONTROLLER
2720static void be_netpoll(struct net_device *netdev)
2721{
2722 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002724 int i;
2725
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002726 for_all_evt_queues(adapter, eqo, i)
2727 event_handle(eqo);
2728
2729 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002730}
2731#endif
2732
Ajit Khaparde84517482009-09-04 03:12:16 +00002733#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002734static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002735 const u8 *p, u32 img_start, int image_size,
2736 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002737{
2738 u32 crc_offset;
2739 u8 flashed_crc[4];
2740 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002741
2742 crc_offset = hdr_size + img_start + image_size - 4;
2743
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002744 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002745
2746 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002747 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002748 if (status) {
2749 dev_err(&adapter->pdev->dev,
2750 "could not get crc from flash, not flashing redboot\n");
2751 return false;
2752 }
2753
2754 /*update redboot only if crc does not match*/
2755 if (!memcmp(flashed_crc, p, 4))
2756 return false;
2757 else
2758 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002759}
2760
Sathya Perla306f1342011-08-02 19:57:45 +00002761static bool phy_flashing_required(struct be_adapter *adapter)
2762{
2763 int status = 0;
2764 struct be_phy_info phy_info;
2765
2766 status = be_cmd_get_phy_info(adapter, &phy_info);
2767 if (status)
2768 return false;
2769 if ((phy_info.phy_type == TN_8022) &&
2770 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2771 return true;
2772 }
2773 return false;
2774}
2775
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002776static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002777 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002778 struct be_dma_mem *flash_cmd, int num_of_images)
2779
Ajit Khaparde84517482009-09-04 03:12:16 +00002780{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002781 int status = 0, i, filehdr_size = 0;
2782 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002783 int num_bytes;
2784 const u8 *p = fw->data;
2785 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002786 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002787 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002788
Sathya Perla306f1342011-08-02 19:57:45 +00002789 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002790 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2791 FLASH_IMAGE_MAX_SIZE_g3},
2792 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2793 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2794 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2795 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2796 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2797 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2798 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2799 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2800 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2801 FLASH_IMAGE_MAX_SIZE_g3},
2802 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2803 FLASH_IMAGE_MAX_SIZE_g3},
2804 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002805 FLASH_IMAGE_MAX_SIZE_g3},
2806 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002807 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2808 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2809 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002810 };
Joe Perches215faf92010-12-21 02:16:10 -08002811 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002812 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2813 FLASH_IMAGE_MAX_SIZE_g2},
2814 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2815 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2816 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2817 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2818 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2819 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2820 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2821 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2822 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2823 FLASH_IMAGE_MAX_SIZE_g2},
2824 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2825 FLASH_IMAGE_MAX_SIZE_g2},
2826 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2827 FLASH_IMAGE_MAX_SIZE_g2}
2828 };
2829
2830 if (adapter->generation == BE_GEN3) {
2831 pflashcomp = gen3_flash_types;
2832 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002833 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002834 } else {
2835 pflashcomp = gen2_flash_types;
2836 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002837 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002838 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002839 for (i = 0; i < num_comp; i++) {
2840 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2841 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2842 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002843 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2844 if (!phy_flashing_required(adapter))
2845 continue;
2846 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002847 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2848 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002849 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2850 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002851 continue;
2852 p = fw->data;
2853 p += filehdr_size + pflashcomp[i].offset
2854 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002855 if (p + pflashcomp[i].size > fw->data + fw->size)
2856 return -1;
2857 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002858 while (total_bytes) {
2859 if (total_bytes > 32*1024)
2860 num_bytes = 32*1024;
2861 else
2862 num_bytes = total_bytes;
2863 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002864 if (!total_bytes) {
2865 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2866 flash_op = FLASHROM_OPER_PHY_FLASH;
2867 else
2868 flash_op = FLASHROM_OPER_FLASH;
2869 } else {
2870 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2871 flash_op = FLASHROM_OPER_PHY_SAVE;
2872 else
2873 flash_op = FLASHROM_OPER_SAVE;
2874 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002875 memcpy(req->params.data_buf, p, num_bytes);
2876 p += num_bytes;
2877 status = be_cmd_write_flashrom(adapter, flash_cmd,
2878 pflashcomp[i].optype, flash_op, num_bytes);
2879 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002880 if ((status == ILLEGAL_IOCTL_REQ) &&
2881 (pflashcomp[i].optype ==
2882 IMG_TYPE_PHY_FW))
2883 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002884 dev_err(&adapter->pdev->dev,
2885 "cmd to write to flash rom failed.\n");
2886 return -1;
2887 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002888 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002889 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002890 return 0;
2891}
2892
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002893static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2894{
2895 if (fhdr == NULL)
2896 return 0;
2897 if (fhdr->build[0] == '3')
2898 return BE_GEN3;
2899 else if (fhdr->build[0] == '2')
2900 return BE_GEN2;
2901 else
2902 return 0;
2903}
2904
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002905static int lancer_fw_download(struct be_adapter *adapter,
2906 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002907{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002908#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2909#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2910 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002911 const u8 *data_ptr = NULL;
2912 u8 *dest_image_ptr = NULL;
2913 size_t image_size = 0;
2914 u32 chunk_size = 0;
2915 u32 data_written = 0;
2916 u32 offset = 0;
2917 int status = 0;
2918 u8 add_status = 0;
2919
2920 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2921 dev_err(&adapter->pdev->dev,
2922 "FW Image not properly aligned. "
2923 "Length must be 4 byte aligned.\n");
2924 status = -EINVAL;
2925 goto lancer_fw_exit;
2926 }
2927
2928 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2929 + LANCER_FW_DOWNLOAD_CHUNK;
2930 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2931 &flash_cmd.dma, GFP_KERNEL);
2932 if (!flash_cmd.va) {
2933 status = -ENOMEM;
2934 dev_err(&adapter->pdev->dev,
2935 "Memory allocation failure while flashing\n");
2936 goto lancer_fw_exit;
2937 }
2938
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002939 dest_image_ptr = flash_cmd.va +
2940 sizeof(struct lancer_cmd_req_write_object);
2941 image_size = fw->size;
2942 data_ptr = fw->data;
2943
2944 while (image_size) {
2945 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2946
2947 /* Copy the image chunk content. */
2948 memcpy(dest_image_ptr, data_ptr, chunk_size);
2949
2950 status = lancer_cmd_write_object(adapter, &flash_cmd,
2951 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2952 &data_written, &add_status);
2953
2954 if (status)
2955 break;
2956
2957 offset += data_written;
2958 data_ptr += data_written;
2959 image_size -= data_written;
2960 }
2961
2962 if (!status) {
2963 /* Commit the FW written */
2964 status = lancer_cmd_write_object(adapter, &flash_cmd,
2965 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2966 &data_written, &add_status);
2967 }
2968
2969 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2970 flash_cmd.dma);
2971 if (status) {
2972 dev_err(&adapter->pdev->dev,
2973 "Firmware load error. "
2974 "Status code: 0x%x Additional Status: 0x%x\n",
2975 status, add_status);
2976 goto lancer_fw_exit;
2977 }
2978
2979 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2980lancer_fw_exit:
2981 return status;
2982}
2983
2984static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2985{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002986 struct flash_file_hdr_g2 *fhdr;
2987 struct flash_file_hdr_g3 *fhdr3;
2988 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002989 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002990 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002991 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002992
2993 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002994 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002995
Ajit Khaparde84517482009-09-04 03:12:16 +00002996 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002997 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2998 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002999 if (!flash_cmd.va) {
3000 status = -ENOMEM;
3001 dev_err(&adapter->pdev->dev,
3002 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003003 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003004 }
3005
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003006 if ((adapter->generation == BE_GEN3) &&
3007 (get_ufigen_type(fhdr) == BE_GEN3)) {
3008 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003009 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3010 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003011 img_hdr_ptr = (struct image_hdr *) (fw->data +
3012 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003013 i * sizeof(struct image_hdr)));
3014 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3015 status = be_flash_data(adapter, fw, &flash_cmd,
3016 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003017 }
3018 } else if ((adapter->generation == BE_GEN2) &&
3019 (get_ufigen_type(fhdr) == BE_GEN2)) {
3020 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3021 } else {
3022 dev_err(&adapter->pdev->dev,
3023 "UFI and Interface are not compatible for flashing\n");
3024 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003025 }
3026
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003027 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3028 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003029 if (status) {
3030 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003031 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003032 }
3033
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003034 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003035
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003036be_fw_exit:
3037 return status;
3038}
3039
3040int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3041{
3042 const struct firmware *fw;
3043 int status;
3044
3045 if (!netif_running(adapter->netdev)) {
3046 dev_err(&adapter->pdev->dev,
3047 "Firmware load not allowed (interface is down)\n");
3048 return -1;
3049 }
3050
3051 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3052 if (status)
3053 goto fw_exit;
3054
3055 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3056
3057 if (lancer_chip(adapter))
3058 status = lancer_fw_download(adapter, fw);
3059 else
3060 status = be_fw_download(adapter, fw);
3061
Ajit Khaparde84517482009-09-04 03:12:16 +00003062fw_exit:
3063 release_firmware(fw);
3064 return status;
3065}
3066
stephen hemmingere5686ad2012-01-05 19:10:25 +00003067static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003068 .ndo_open = be_open,
3069 .ndo_stop = be_close,
3070 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003071 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072 .ndo_set_mac_address = be_mac_addr_set,
3073 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003074 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3077 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003078 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003079 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003080 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003081 .ndo_get_vf_config = be_get_vf_config,
3082#ifdef CONFIG_NET_POLL_CONTROLLER
3083 .ndo_poll_controller = be_netpoll,
3084#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085};
3086
3087static void be_netdev_init(struct net_device *netdev)
3088{
3089 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003090 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003091 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003093 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003094 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3095 NETIF_F_HW_VLAN_TX;
3096 if (be_multi_rxq(adapter))
3097 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003098
3099 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003100 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003101
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003102 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003103 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003104
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003105 netdev->priv_flags |= IFF_UNICAST_FLT;
3106
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107 netdev->flags |= IFF_MULTICAST;
3108
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003109 netif_set_gso_max_size(netdev, 65535);
3110
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003111 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112
3113 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003115 for_all_evt_queues(adapter, eqo, i)
3116 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117}
3118
3119static void be_unmap_pci_bars(struct be_adapter *adapter)
3120{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003121 if (adapter->csr)
3122 iounmap(adapter->csr);
3123 if (adapter->db)
3124 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125}
3126
3127static int be_map_pci_bars(struct be_adapter *adapter)
3128{
3129 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003130 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003132 if (lancer_chip(adapter)) {
3133 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3134 pci_resource_len(adapter->pdev, 0));
3135 if (addr == NULL)
3136 return -ENOMEM;
3137 adapter->db = addr;
3138 return 0;
3139 }
3140
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003141 if (be_physfn(adapter)) {
3142 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3143 pci_resource_len(adapter->pdev, 2));
3144 if (addr == NULL)
3145 return -ENOMEM;
3146 adapter->csr = addr;
3147 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003149 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003150 db_reg = 4;
3151 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003152 if (be_physfn(adapter))
3153 db_reg = 4;
3154 else
3155 db_reg = 0;
3156 }
3157 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3158 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003159 if (addr == NULL)
3160 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003161 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003163 return 0;
3164pci_map_err:
3165 be_unmap_pci_bars(adapter);
3166 return -ENOMEM;
3167}
3168
3169
3170static void be_ctrl_cleanup(struct be_adapter *adapter)
3171{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003172 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173
3174 be_unmap_pci_bars(adapter);
3175
3176 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003177 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3178 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003179
Sathya Perla5b8821b2011-08-02 19:57:44 +00003180 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003181 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003182 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3183 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184}
3185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186static int be_ctrl_init(struct be_adapter *adapter)
3187{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003188 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3189 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003190 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192
3193 status = be_map_pci_bars(adapter);
3194 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003195 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196
3197 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003198 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3199 mbox_mem_alloc->size,
3200 &mbox_mem_alloc->dma,
3201 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003203 status = -ENOMEM;
3204 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003205 }
3206 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3207 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3208 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3209 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003210
Sathya Perla5b8821b2011-08-02 19:57:44 +00003211 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3212 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3213 &rx_filter->dma, GFP_KERNEL);
3214 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003215 status = -ENOMEM;
3216 goto free_mbox;
3217 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003218 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003219
Ivan Vecera29849612010-12-14 05:43:19 +00003220 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003221 spin_lock_init(&adapter->mcc_lock);
3222 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003224 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003225 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003226 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003227
3228free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003229 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3230 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003231
3232unmap_pci_bars:
3233 be_unmap_pci_bars(adapter);
3234
3235done:
3236 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237}
3238
3239static void be_stats_cleanup(struct be_adapter *adapter)
3240{
Sathya Perla3abcded2010-10-03 22:12:27 -07003241 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242
3243 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003244 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3245 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003246}
3247
3248static int be_stats_init(struct be_adapter *adapter)
3249{
Sathya Perla3abcded2010-10-03 22:12:27 -07003250 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003251
Selvin Xavier005d5692011-05-16 07:36:35 +00003252 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003253 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003254 } else {
3255 if (lancer_chip(adapter))
3256 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3257 else
3258 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3259 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003260 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3261 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003262 if (cmd->va == NULL)
3263 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003264 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003265 return 0;
3266}
3267
3268static void __devexit be_remove(struct pci_dev *pdev)
3269{
3270 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003272 if (!adapter)
3273 return;
3274
3275 unregister_netdev(adapter->netdev);
3276
Sathya Perla5fb379e2009-06-18 00:02:59 +00003277 be_clear(adapter);
3278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279 be_stats_cleanup(adapter);
3280
3281 be_ctrl_cleanup(adapter);
3282
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003283 be_sriov_disable(adapter);
3284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285 pci_set_drvdata(pdev, NULL);
3286 pci_release_regions(pdev);
3287 pci_disable_device(pdev);
3288
3289 free_netdev(adapter->netdev);
3290}
3291
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003292bool be_is_wol_supported(struct be_adapter *adapter)
3293{
3294 return ((adapter->wol_cap & BE_WOL_CAP) &&
3295 !be_is_wol_excluded(adapter)) ? true : false;
3296}
3297
Sathya Perla2243e2e2009-11-22 22:02:03 +00003298static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003301
Sathya Perla3abcded2010-10-03 22:12:27 -07003302 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3303 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003304 if (status)
3305 return status;
3306
Sathya Perla752961a2011-10-24 02:45:03 +00003307 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003308 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003309 else
3310 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3311
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003312 if (be_physfn(adapter))
3313 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3314 else
3315 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3316
3317 /* primary mac needs 1 pmac entry */
3318 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3319 sizeof(u32), GFP_KERNEL);
3320 if (!adapter->pmac_id)
3321 return -ENOMEM;
3322
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003323 status = be_cmd_get_cntl_attributes(adapter);
3324 if (status)
3325 return status;
3326
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003327 status = be_cmd_get_acpi_wol_cap(adapter);
3328 if (status) {
3329 /* in case of a failure to get wol capabillities
3330 * check the exclusion list to determine WOL capability */
3331 if (!be_is_wol_excluded(adapter))
3332 adapter->wol_cap |= BE_WOL_CAP;
3333 }
3334
3335 if (be_is_wol_supported(adapter))
3336 adapter->wol = true;
3337
Sathya Perla2243e2e2009-11-22 22:02:03 +00003338 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003339}
3340
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003341static int be_dev_family_check(struct be_adapter *adapter)
3342{
3343 struct pci_dev *pdev = adapter->pdev;
3344 u32 sli_intf = 0, if_type;
3345
3346 switch (pdev->device) {
3347 case BE_DEVICE_ID1:
3348 case OC_DEVICE_ID1:
3349 adapter->generation = BE_GEN2;
3350 break;
3351 case BE_DEVICE_ID2:
3352 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003353 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003354 adapter->generation = BE_GEN3;
3355 break;
3356 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003357 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003358 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3359 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3360 SLI_INTF_IF_TYPE_SHIFT;
3361
3362 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3363 if_type != 0x02) {
3364 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3365 return -EINVAL;
3366 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003367 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3368 SLI_INTF_FAMILY_SHIFT);
3369 adapter->generation = BE_GEN3;
3370 break;
3371 default:
3372 adapter->generation = 0;
3373 }
3374 return 0;
3375}
3376
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003377static int lancer_wait_ready(struct be_adapter *adapter)
3378{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003379#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003380 u32 sliport_status;
3381 int status = 0, i;
3382
3383 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3384 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3385 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3386 break;
3387
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003388 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003389 }
3390
3391 if (i == SLIPORT_READY_TIMEOUT)
3392 status = -1;
3393
3394 return status;
3395}
3396
3397static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3398{
3399 int status;
3400 u32 sliport_status, err, reset_needed;
3401 status = lancer_wait_ready(adapter);
3402 if (!status) {
3403 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3404 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3405 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3406 if (err && reset_needed) {
3407 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3408 adapter->db + SLIPORT_CONTROL_OFFSET);
3409
3410 /* check adapter has corrected the error */
3411 status = lancer_wait_ready(adapter);
3412 sliport_status = ioread32(adapter->db +
3413 SLIPORT_STATUS_OFFSET);
3414 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3415 SLIPORT_STATUS_RN_MASK);
3416 if (status || sliport_status)
3417 status = -1;
3418 } else if (err || reset_needed) {
3419 status = -1;
3420 }
3421 }
3422 return status;
3423}
3424
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003425static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3426{
3427 int status;
3428 u32 sliport_status;
3429
3430 if (adapter->eeh_err || adapter->ue_detected)
3431 return;
3432
3433 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3434
3435 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3436 dev_err(&adapter->pdev->dev,
3437 "Adapter in error state."
3438 "Trying to recover.\n");
3439
3440 status = lancer_test_and_set_rdy_state(adapter);
3441 if (status)
3442 goto err;
3443
3444 netif_device_detach(adapter->netdev);
3445
3446 if (netif_running(adapter->netdev))
3447 be_close(adapter->netdev);
3448
3449 be_clear(adapter);
3450
3451 adapter->fw_timeout = false;
3452
3453 status = be_setup(adapter);
3454 if (status)
3455 goto err;
3456
3457 if (netif_running(adapter->netdev)) {
3458 status = be_open(adapter->netdev);
3459 if (status)
3460 goto err;
3461 }
3462
3463 netif_device_attach(adapter->netdev);
3464
3465 dev_err(&adapter->pdev->dev,
3466 "Adapter error recovery succeeded\n");
3467 }
3468 return;
3469err:
3470 dev_err(&adapter->pdev->dev,
3471 "Adapter error recovery failed\n");
3472}
3473
3474static void be_worker(struct work_struct *work)
3475{
3476 struct be_adapter *adapter =
3477 container_of(work, struct be_adapter, work.work);
3478 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003479 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003480 int i;
3481
3482 if (lancer_chip(adapter))
3483 lancer_test_and_recover_fn_err(adapter);
3484
3485 be_detect_dump_ue(adapter);
3486
3487 /* when interrupts are not yet enabled, just reap any pending
3488 * mcc completions */
3489 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003490 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003491 goto reschedule;
3492 }
3493
3494 if (!adapter->stats_cmd_sent) {
3495 if (lancer_chip(adapter))
3496 lancer_cmd_get_pport_stats(adapter,
3497 &adapter->stats_cmd);
3498 else
3499 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3500 }
3501
3502 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003503 if (rxo->rx_post_starved) {
3504 rxo->rx_post_starved = false;
3505 be_post_rx_frags(rxo, GFP_KERNEL);
3506 }
3507 }
3508
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003509 for_all_evt_queues(adapter, eqo, i)
3510 be_eqd_update(adapter, eqo);
3511
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003512reschedule:
3513 adapter->work_counter++;
3514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3515}
3516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517static int __devinit be_probe(struct pci_dev *pdev,
3518 const struct pci_device_id *pdev_id)
3519{
3520 int status = 0;
3521 struct be_adapter *adapter;
3522 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523
3524 status = pci_enable_device(pdev);
3525 if (status)
3526 goto do_none;
3527
3528 status = pci_request_regions(pdev, DRV_NAME);
3529 if (status)
3530 goto disable_dev;
3531 pci_set_master(pdev);
3532
Sathya Perla3c8def92011-06-12 20:01:58 +00003533 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534 if (netdev == NULL) {
3535 status = -ENOMEM;
3536 goto rel_reg;
3537 }
3538 adapter = netdev_priv(netdev);
3539 adapter->pdev = pdev;
3540 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003541
3542 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003543 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003544 goto free_netdev;
3545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003547 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003549 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003550 if (!status) {
3551 netdev->features |= NETIF_F_HIGHDMA;
3552 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003553 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003554 if (status) {
3555 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3556 goto free_netdev;
3557 }
3558 }
3559
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003560 status = be_sriov_enable(adapter);
3561 if (status)
3562 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 status = be_ctrl_init(adapter);
3565 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003566 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003568 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003569 status = lancer_wait_ready(adapter);
3570 if (!status) {
3571 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3572 adapter->db + SLIPORT_CONTROL_OFFSET);
3573 status = lancer_test_and_set_rdy_state(adapter);
3574 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003575 if (status) {
3576 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003577 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003578 }
3579 }
3580
Sathya Perla2243e2e2009-11-22 22:02:03 +00003581 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003582 if (be_physfn(adapter)) {
3583 status = be_cmd_POST(adapter);
3584 if (status)
3585 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003586 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003587
3588 /* tell fw we're ready to fire cmds */
3589 status = be_cmd_fw_init(adapter);
3590 if (status)
3591 goto ctrl_clean;
3592
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003593 status = be_cmd_reset_function(adapter);
3594 if (status)
3595 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003596
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003597 /* The INTR bit may be set in the card when probed by a kdump kernel
3598 * after a crash.
3599 */
3600 if (!lancer_chip(adapter))
3601 be_intr_set(adapter, false);
3602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003603 status = be_stats_init(adapter);
3604 if (status)
3605 goto ctrl_clean;
3606
Sathya Perla2243e2e2009-11-22 22:02:03 +00003607 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003608 if (status)
3609 goto stats_clean;
3610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003611 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003612 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003613
Sathya Perla5fb379e2009-06-18 00:02:59 +00003614 status = be_setup(adapter);
3615 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003616 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003617
Sathya Perla3abcded2010-10-03 22:12:27 -07003618 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003619 status = register_netdev(netdev);
3620 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003621 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003623 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3624 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003625
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003626 return 0;
3627
Sathya Perla5fb379e2009-06-18 00:02:59 +00003628unsetup:
3629 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003630msix_disable:
3631 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003632stats_clean:
3633 be_stats_cleanup(adapter);
3634ctrl_clean:
3635 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003636disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003637 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003638free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003639 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003640 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641rel_reg:
3642 pci_release_regions(pdev);
3643disable_dev:
3644 pci_disable_device(pdev);
3645do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003646 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 return status;
3648}
3649
3650static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3651{
3652 struct be_adapter *adapter = pci_get_drvdata(pdev);
3653 struct net_device *netdev = adapter->netdev;
3654
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003655 if (adapter->wol)
3656 be_setup_wol(adapter, true);
3657
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003658 netif_device_detach(netdev);
3659 if (netif_running(netdev)) {
3660 rtnl_lock();
3661 be_close(netdev);
3662 rtnl_unlock();
3663 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003664 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003665
3666 pci_save_state(pdev);
3667 pci_disable_device(pdev);
3668 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3669 return 0;
3670}
3671
3672static int be_resume(struct pci_dev *pdev)
3673{
3674 int status = 0;
3675 struct be_adapter *adapter = pci_get_drvdata(pdev);
3676 struct net_device *netdev = adapter->netdev;
3677
3678 netif_device_detach(netdev);
3679
3680 status = pci_enable_device(pdev);
3681 if (status)
3682 return status;
3683
3684 pci_set_power_state(pdev, 0);
3685 pci_restore_state(pdev);
3686
Sathya Perla2243e2e2009-11-22 22:02:03 +00003687 /* tell fw we're ready to fire cmds */
3688 status = be_cmd_fw_init(adapter);
3689 if (status)
3690 return status;
3691
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003692 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003693 if (netif_running(netdev)) {
3694 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003695 be_open(netdev);
3696 rtnl_unlock();
3697 }
3698 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003699
3700 if (adapter->wol)
3701 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003703 return 0;
3704}
3705
Sathya Perla82456b02010-02-17 01:35:37 +00003706/*
3707 * An FLR will stop BE from DMAing any data.
3708 */
3709static void be_shutdown(struct pci_dev *pdev)
3710{
3711 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003712
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003713 if (!adapter)
3714 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003715
Sathya Perla0f4a6822011-03-21 20:49:28 +00003716 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003717
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003718 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003719
Sathya Perla82456b02010-02-17 01:35:37 +00003720 if (adapter->wol)
3721 be_setup_wol(adapter, true);
3722
Ajit Khaparde57841862011-04-06 18:08:43 +00003723 be_cmd_reset_function(adapter);
3724
Sathya Perla82456b02010-02-17 01:35:37 +00003725 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003726}
3727
Sathya Perlacf588472010-02-14 21:22:01 +00003728static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3729 pci_channel_state_t state)
3730{
3731 struct be_adapter *adapter = pci_get_drvdata(pdev);
3732 struct net_device *netdev = adapter->netdev;
3733
3734 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3735
3736 adapter->eeh_err = true;
3737
3738 netif_device_detach(netdev);
3739
3740 if (netif_running(netdev)) {
3741 rtnl_lock();
3742 be_close(netdev);
3743 rtnl_unlock();
3744 }
3745 be_clear(adapter);
3746
3747 if (state == pci_channel_io_perm_failure)
3748 return PCI_ERS_RESULT_DISCONNECT;
3749
3750 pci_disable_device(pdev);
3751
3752 return PCI_ERS_RESULT_NEED_RESET;
3753}
3754
3755static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3756{
3757 struct be_adapter *adapter = pci_get_drvdata(pdev);
3758 int status;
3759
3760 dev_info(&adapter->pdev->dev, "EEH reset\n");
3761 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003762 adapter->ue_detected = false;
3763 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003764
3765 status = pci_enable_device(pdev);
3766 if (status)
3767 return PCI_ERS_RESULT_DISCONNECT;
3768
3769 pci_set_master(pdev);
3770 pci_set_power_state(pdev, 0);
3771 pci_restore_state(pdev);
3772
3773 /* Check if card is ok and fw is ready */
3774 status = be_cmd_POST(adapter);
3775 if (status)
3776 return PCI_ERS_RESULT_DISCONNECT;
3777
3778 return PCI_ERS_RESULT_RECOVERED;
3779}
3780
3781static void be_eeh_resume(struct pci_dev *pdev)
3782{
3783 int status = 0;
3784 struct be_adapter *adapter = pci_get_drvdata(pdev);
3785 struct net_device *netdev = adapter->netdev;
3786
3787 dev_info(&adapter->pdev->dev, "EEH resume\n");
3788
3789 pci_save_state(pdev);
3790
3791 /* tell fw we're ready to fire cmds */
3792 status = be_cmd_fw_init(adapter);
3793 if (status)
3794 goto err;
3795
3796 status = be_setup(adapter);
3797 if (status)
3798 goto err;
3799
3800 if (netif_running(netdev)) {
3801 status = be_open(netdev);
3802 if (status)
3803 goto err;
3804 }
3805 netif_device_attach(netdev);
3806 return;
3807err:
3808 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003809}
3810
3811static struct pci_error_handlers be_eeh_handlers = {
3812 .error_detected = be_eeh_err_detected,
3813 .slot_reset = be_eeh_reset,
3814 .resume = be_eeh_resume,
3815};
3816
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003817static struct pci_driver be_driver = {
3818 .name = DRV_NAME,
3819 .id_table = be_dev_ids,
3820 .probe = be_probe,
3821 .remove = be_remove,
3822 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003823 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003824 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003825 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826};
3827
3828static int __init be_init_module(void)
3829{
Joe Perches8e95a202009-12-03 07:58:21 +00003830 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3831 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832 printk(KERN_WARNING DRV_NAME
3833 " : Module param rx_frag_size must be 2048/4096/8192."
3834 " Using 2048\n");
3835 rx_frag_size = 2048;
3836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838 return pci_register_driver(&be_driver);
3839}
3840module_init(be_init_module);
3841
3842static void __exit be_exit_module(void)
3843{
3844 pci_unregister_driver(&be_driver);
3845}
3846module_exit(be_exit_module);