blob: 8bc9e125e0232e084030817d87696d5ce3e8d758 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000793 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794
795 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000796 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000801 /* No need to further configure vids if in promiscuous mode */
802 if (adapter->promiscuous)
803 return 0;
804
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805 if (adapter->vlans_added > adapter->max_vlans)
806 goto set_vlan_promisc;
807
808 /* Construct VLAN Table to give to HW */
809 for (i = 0; i < VLAN_N_VID; i++)
810 if (adapter->vlan_tag[i])
811 vtag[ntags++] = cpu_to_le16(i);
812
813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 vtag, ntags, 1, 0);
815
816 /* Set to VLAN promisc mode as setting VLAN filter failed */
817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822
Sathya Perlab31c50a2009-09-17 10:30:13 -0700823 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Jiri Pirko8e586132011-12-08 19:52:37 -0500831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
833 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000834 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000836 if (!be_physfn(adapter)) {
837 status = -EINVAL;
838 goto ret;
839 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000842 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!status)
846 adapter->vlans_added++;
847 else
848 adapter->vlan_tag[vid] = 0;
849ret:
850 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851}
852
Jiri Pirko8e586132011-12-08 19:52:37 -0500853static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854{
855 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000856 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 if (!be_physfn(adapter)) {
859 status = -EINVAL;
860 goto ret;
861 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000864 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000865 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500866
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000867 if (!status)
868 adapter->vlans_added--;
869 else
870 adapter->vlan_tag[vid] = 1;
871ret:
872 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873}
874
Sathya Perlaa54769f2011-10-24 02:45:00 +0000875static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876{
877 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879
880 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 adapter->promiscuous = true;
883 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000885
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300886 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000887 if (adapter->promiscuous) {
888 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000889 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000890
891 if (adapter->vlans_added)
892 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000893 }
894
Sathya Perlae7b909a2009-11-22 22:01:10 +0000895 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000896 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 goto done;
900 }
901
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000902 if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 struct netdev_hw_addr *ha;
904 int i = 1; /* First slot is claimed by the Primary MAC */
905
906 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 be_cmd_pmac_del(adapter, adapter->if_handle,
908 adapter->pmac_id[i], 0);
909 }
910
911 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 adapter->promiscuous = true;
914 goto done;
915 }
916
917 netdev_for_each_uc_addr(ha, adapter->netdev) {
918 adapter->uc_macs++; /* First slot is for Primary MAC */
919 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 adapter->if_handle,
921 &adapter->pmac_id[adapter->uc_macs], 0);
922 }
923 }
924
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000933done:
934 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935}
936
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000941 int status;
942
Sathya Perla11ac75e2011-12-13 00:58:50 +0000943 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return -EPERM;
945
Sathya Perla11ac75e2011-12-13 00:58:50 +0000946 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947 return -EINVAL;
948
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000949 if (lancer_chip(adapter)) {
950 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
951 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000954
Sathya Perla11ac75e2011-12-13 00:58:50 +0000955 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000957 }
958
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000962 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000964
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 return status;
966}
967
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968static int be_get_vf_config(struct net_device *netdev, int vf,
969 struct ifla_vf_info *vi)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000975 return -EPERM;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 return -EINVAL;
979
980 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000981 vi->tx_rate = vf_cfg->tx_rate;
982 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000984 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985
986 return 0;
987}
988
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989static int be_set_vf_vlan(struct net_device *netdev,
990 int vf, u16 vlan, u8 qos)
991{
992 struct be_adapter *adapter = netdev_priv(netdev);
993 int status = 0;
994
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000996 return -EPERM;
997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999 return -EINVAL;
1000
1001 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001002 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 /* If this is new value, program it. Else skip. */
1004 adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006 status = be_cmd_set_hsw_config(adapter, vlan,
1007 vf + 1, adapter->vf_cfg[vf].if_handle);
1008 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001009 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001010 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001012 vlan = adapter->vf_cfg[vf].def_vid;
1013 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015 }
1016
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001017
1018 if (status)
1019 dev_info(&adapter->pdev->dev,
1020 "VLAN %d config on VF %d failed\n", vlan, vf);
1021 return status;
1022}
1023
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024static int be_set_vf_tx_rate(struct net_device *netdev,
1025 int vf, int rate)
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
1028 int status = 0;
1029
Sathya Perla11ac75e2011-12-13 00:58:50 +00001030 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001031 return -EPERM;
1032
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001033 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034 return -EINVAL;
1035
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001036 if (rate < 100 || rate > 10000) {
1037 dev_err(&adapter->pdev->dev,
1038 "tx rate must be between 100 and 10000 Mbps\n");
1039 return -EINVAL;
1040 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041
Ajit Khaparde856c4012011-02-11 13:32:32 +00001042 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001043
1044 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001045 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001046 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001047 else
1048 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001049 return status;
1050}
1051
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001052static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001054 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001055 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001056 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001057 u64 pkts;
1058 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001060 if (!eqo->enable_aic) {
1061 eqd = eqo->eqd;
1062 goto modify_eqd;
1063 }
1064
1065 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001066 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001068 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1069
Sathya Perla4097f662009-03-24 16:40:13 -07001070 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 if (time_before(now, stats->rx_jiffies)) {
1072 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001073 return;
1074 }
1075
Sathya Perlaac124ff2011-07-25 19:10:14 +00001076 /* Update once a second */
1077 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001078 return;
1079
Sathya Perlaab1594e2011-07-25 19:10:15 +00001080 do {
1081 start = u64_stats_fetch_begin_bh(&stats->sync);
1082 pkts = stats->rx_pkts;
1083 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1084
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001085 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001086 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001087 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001088 eqd = (stats->rx_pps / 110000) << 3;
1089 eqd = min(eqd, eqo->max_eqd);
1090 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001091 if (eqd < 10)
1092 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001093
1094modify_eqd:
1095 if (eqd != eqo->cur_eqd) {
1096 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1097 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001098 }
Sathya Perla4097f662009-03-24 16:40:13 -07001099}
1100
Sathya Perla3abcded2010-10-03 22:12:27 -07001101static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001103{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001104 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001105
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001107 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001109 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001111 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001113 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001114 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115}
1116
Sathya Perla2e588f82011-03-11 02:49:26 +00001117static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001118{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001119 /* L4 checksum is not reliable for non TCP/UDP packets.
1120 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001121 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1122 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001123}
1124
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001125static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1126 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001128 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001130 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 BUG_ON(!rx_page_info->page);
1134
Ajit Khaparde205859a2010-02-09 01:34:21 +00001135 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001136 dma_unmap_page(&adapter->pdev->dev,
1137 dma_unmap_addr(rx_page_info, bus),
1138 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001139 rx_page_info->last_page_user = false;
1140 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
1142 atomic_dec(&rxq->used);
1143 return rx_page_info;
1144}
1145
1146/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001147static void be_rx_compl_discard(struct be_rx_obj *rxo,
1148 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001152 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001154 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001155 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001156 put_page(page_info->page);
1157 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159 }
1160}
1161
1162/*
1163 * skb_fill_rx_data forms a complete skb for an ether frame
1164 * indicated by rxcp.
1165 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001166static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1167 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168{
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 u16 i, j;
1172 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 u8 *start;
1174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 start = page_address(page_info->page) + page_info->page_offset;
1177 prefetch(start);
1178
1179 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001180 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
1182 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001183 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 memcpy(skb->data, start, hdr_len);
1185 skb->len = curr_frag_len;
1186 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1187 /* Complete packet has now been moved to data */
1188 put_page(page_info->page);
1189 skb->data_len = 0;
1190 skb->tail += curr_frag_len;
1191 } else {
1192 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001193 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 skb_shinfo(skb)->frags[0].page_offset =
1195 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001196 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001198 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 skb->tail += hdr_len;
1200 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001201 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 if (rxcp->pkt_size <= rx_frag_size) {
1204 BUG_ON(rxcp->num_rcvd != 1);
1205 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 }
1207
1208 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 index_inc(&rxcp->rxq_idx, rxq->len);
1210 remaining = rxcp->pkt_size - curr_frag_len;
1211 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001212 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001213 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001215 /* Coalesce all frags from the same physical page in one slot */
1216 if (page_info->page_offset == 0) {
1217 /* Fresh page */
1218 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001219 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001220 skb_shinfo(skb)->frags[j].page_offset =
1221 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001222 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001223 skb_shinfo(skb)->nr_frags++;
1224 } else {
1225 put_page(page_info->page);
1226 }
1227
Eric Dumazet9e903e02011-10-18 21:00:24 +00001228 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 skb->len += curr_frag_len;
1230 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001231 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 remaining -= curr_frag_len;
1233 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001234 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001236 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237}
1238
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001239/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001240static void be_rx_compl_process(struct be_rx_obj *rxo,
1241 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001243 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001244 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001246
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001247 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001248 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001249 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001250 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 return;
1252 }
1253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001254 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001256 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001257 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001258 else
1259 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001261 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001262 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001263 skb->rxhash = rxcp->rss_hash;
1264
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265
Jiri Pirko343e43c2011-08-25 02:50:51 +00001266 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001267 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1268
1269 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270}
1271
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001272/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1274 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001276 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001278 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001279 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001280 u16 remaining, curr_frag_len;
1281 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001283 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001284 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001285 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001286 return;
1287 }
1288
Sathya Perla2e588f82011-03-11 02:49:26 +00001289 remaining = rxcp->pkt_size;
1290 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001291 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
1293 curr_frag_len = min(remaining, rx_frag_size);
1294
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001295 /* Coalesce all frags from the same physical page in one slot */
1296 if (i == 0 || page_info->page_offset == 0) {
1297 /* First frag or Fresh page */
1298 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001299 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001300 skb_shinfo(skb)->frags[j].page_offset =
1301 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001302 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001303 } else {
1304 put_page(page_info->page);
1305 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001306 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001307 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001309 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310 memset(page_info, 0, sizeof(*page_info));
1311 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001312 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001314 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 skb->len = rxcp->pkt_size;
1316 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001317 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001318 if (adapter->netdev->features & NETIF_F_RXHASH)
1319 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001320
Jiri Pirko343e43c2011-08-25 02:50:51 +00001321 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001322 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001324 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325}
1326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001327static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1328 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329{
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 rxcp->pkt_size =
1331 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1332 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1333 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1334 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001335 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001336 rxcp->ip_csum =
1337 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1338 rxcp->l4_csum =
1339 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1340 rxcp->ipv6 =
1341 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1342 rxcp->rxq_idx =
1343 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1344 rxcp->num_rcvd =
1345 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1346 rxcp->pkt_type =
1347 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001348 rxcp->rss_hash =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001350 if (rxcp->vlanf) {
1351 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001352 compl);
1353 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1354 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001355 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001356 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001357}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001359static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1360 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001361{
1362 rxcp->pkt_size =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1364 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1365 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1366 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001367 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001368 rxcp->ip_csum =
1369 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1370 rxcp->l4_csum =
1371 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1372 rxcp->ipv6 =
1373 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1374 rxcp->rxq_idx =
1375 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1376 rxcp->num_rcvd =
1377 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1378 rxcp->pkt_type =
1379 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001380 rxcp->rss_hash =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001382 if (rxcp->vlanf) {
1383 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001384 compl);
1385 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1386 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001387 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001388 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001389}
1390
1391static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1392{
1393 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1394 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1395 struct be_adapter *adapter = rxo->adapter;
1396
1397 /* For checking the valid bit it is Ok to use either definition as the
1398 * valid bit is at the same position in both v0 and v1 Rx compl */
1399 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 return NULL;
1401
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001402 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001403 be_dws_le_to_cpu(compl, sizeof(*compl));
1404
1405 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001406 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001407 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409
Sathya Perla15d72182011-03-21 20:49:26 +00001410 if (rxcp->vlanf) {
1411 /* vlanf could be wrongly set in some cards.
1412 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001413 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001414 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001415
Sathya Perla15d72182011-03-21 20:49:26 +00001416 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001417 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001418
Somnath Kotur939cf302011-08-18 21:51:49 -07001419 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001420 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001421 rxcp->vlanf = 0;
1422 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001423
1424 /* As the compl has been parsed, reset it; we wont touch it again */
1425 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 return rxcp;
1429}
1430
Eric Dumazet1829b082011-03-01 05:48:12 +00001431static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001434
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001436 gfp |= __GFP_COMP;
1437 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438}
1439
1440/*
1441 * Allocate a page, split it to fragments of size rx_frag_size and post as
1442 * receive buffers to BE
1443 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001444static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perla3abcded2010-10-03 22:12:27 -07001446 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001447 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001448 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 struct page *pagep = NULL;
1450 struct be_eth_rx_d *rxd;
1451 u64 page_dmaaddr = 0, frag_dmaaddr;
1452 u32 posted, page_offset = 0;
1453
Sathya Perla3abcded2010-10-03 22:12:27 -07001454 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1456 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001457 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001459 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 break;
1461 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001462 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1463 0, adapter->big_page_size,
1464 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465 page_info->page_offset = 0;
1466 } else {
1467 get_page(pagep);
1468 page_info->page_offset = page_offset + rx_frag_size;
1469 }
1470 page_offset = page_info->page_offset;
1471 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001472 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1474
1475 rxd = queue_head_node(rxq);
1476 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1477 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
1479 /* Any space left in the current big page for another frag? */
1480 if ((page_offset + rx_frag_size + rx_frag_size) >
1481 adapter->big_page_size) {
1482 pagep = NULL;
1483 page_info->last_page_user = true;
1484 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001485
1486 prev_page_info = page_info;
1487 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 }
1490 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001491 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
1493 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001495 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001496 } else if (atomic_read(&rxq->used) == 0) {
1497 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500}
1501
Sathya Perla5fb379e2009-06-18 00:02:59 +00001502static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1505
1506 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1507 return NULL;
1508
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001509 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1511
1512 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1513
1514 queue_tail_inc(tx_cq);
1515 return txcp;
1516}
1517
Sathya Perla3c8def92011-06-12 20:01:58 +00001518static u16 be_tx_compl_process(struct be_adapter *adapter,
1519 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520{
Sathya Perla3c8def92011-06-12 20:01:58 +00001521 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001522 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001523 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001525 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1526 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001528 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001530 sent_skbs[txq->tail] = NULL;
1531
1532 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001533 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001535 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001537 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001538 unmap_tx_frag(&adapter->pdev->dev, wrb,
1539 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001540 unmap_skb_hdr = false;
1541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 num_wrbs++;
1543 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001544 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001547 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548}
1549
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001550/* Return the number of events in the event queue */
1551static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001552{
1553 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001555
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001556 do {
1557 eqe = queue_tail_node(&eqo->q);
1558 if (eqe->evt == 0)
1559 break;
1560
1561 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001562 eqe->evt = 0;
1563 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001564 queue_tail_inc(&eqo->q);
1565 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001566
1567 return num;
1568}
1569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001571{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572 bool rearm = false;
1573 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001574
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001575 /* Deal with any spurious interrupts that come without events */
1576 if (!num)
1577 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001578
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001579 if (num || msix_enabled(eqo->adapter))
1580 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1581
Sathya Perla859b1e42009-08-10 03:43:51 +00001582 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001583 napi_schedule(&eqo->napi);
1584
1585 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001586}
1587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001588/* Leaves the EQ is disarmed state */
1589static void be_eq_clean(struct be_eq_obj *eqo)
1590{
1591 int num = events_get(eqo);
1592
1593 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1594}
1595
1596static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597{
1598 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001599 struct be_queue_info *rxq = &rxo->q;
1600 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001601 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 u16 tail;
1603
1604 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001605 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 be_rx_compl_discard(rxo, rxcp);
1607 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 }
1609
1610 /* Then free posted rx buffer that were not used */
1611 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001612 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 put_page(page_info->page);
1615 memset(page_info, 0, sizeof(*page_info));
1616 }
1617 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001618 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619}
1620
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001621static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001623 struct be_tx_obj *txo;
1624 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001625 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001626 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001627 struct sk_buff *sent_skb;
1628 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001629 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Sathya Perlaa8e91792009-08-10 03:42:43 +00001631 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1632 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001633 pending_txqs = adapter->num_tx_qs;
1634
1635 for_all_tx_queues(adapter, txo, i) {
1636 txq = &txo->q;
1637 while ((txcp = be_tx_compl_get(&txo->cq))) {
1638 end_idx =
1639 AMAP_GET_BITS(struct amap_eth_tx_compl,
1640 wrb_index, txcp);
1641 num_wrbs += be_tx_compl_process(adapter, txo,
1642 end_idx);
1643 cmpl++;
1644 }
1645 if (cmpl) {
1646 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1647 atomic_sub(num_wrbs, &txq->used);
1648 cmpl = 0;
1649 num_wrbs = 0;
1650 }
1651 if (atomic_read(&txq->used) == 0)
1652 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001653 }
1654
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001655 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001656 break;
1657
1658 mdelay(1);
1659 } while (true);
1660
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001661 for_all_tx_queues(adapter, txo, i) {
1662 txq = &txo->q;
1663 if (atomic_read(&txq->used))
1664 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1665 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001666
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001667 /* free posted tx for which compls will never arrive */
1668 while (atomic_read(&txq->used)) {
1669 sent_skb = txo->sent_skb_list[txq->tail];
1670 end_idx = txq->tail;
1671 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1672 &dummy_wrb);
1673 index_adv(&end_idx, num_wrbs - 1, txq->len);
1674 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1675 atomic_sub(num_wrbs, &txq->used);
1676 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001677 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678}
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680static void be_evt_queues_destroy(struct be_adapter *adapter)
1681{
1682 struct be_eq_obj *eqo;
1683 int i;
1684
1685 for_all_evt_queues(adapter, eqo, i) {
1686 be_eq_clean(eqo);
1687 if (eqo->q.created)
1688 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1689 be_queue_free(adapter, &eqo->q);
1690 }
1691}
1692
1693static int be_evt_queues_create(struct be_adapter *adapter)
1694{
1695 struct be_queue_info *eq;
1696 struct be_eq_obj *eqo;
1697 int i, rc;
1698
1699 adapter->num_evt_qs = num_irqs(adapter);
1700
1701 for_all_evt_queues(adapter, eqo, i) {
1702 eqo->adapter = adapter;
1703 eqo->tx_budget = BE_TX_BUDGET;
1704 eqo->idx = i;
1705 eqo->max_eqd = BE_MAX_EQD;
1706 eqo->enable_aic = true;
1707
1708 eq = &eqo->q;
1709 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1710 sizeof(struct be_eq_entry));
1711 if (rc)
1712 return rc;
1713
1714 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1715 if (rc)
1716 return rc;
1717 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001718 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001719}
1720
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721static void be_mcc_queues_destroy(struct be_adapter *adapter)
1722{
1723 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001724
Sathya Perla8788fdc2009-07-27 22:52:03 +00001725 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001726 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001727 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001728 be_queue_free(adapter, q);
1729
Sathya Perla8788fdc2009-07-27 22:52:03 +00001730 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001731 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001732 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001733 be_queue_free(adapter, q);
1734}
1735
1736/* Must be called only after TX qs are created as MCC shares TX EQ */
1737static int be_mcc_queues_create(struct be_adapter *adapter)
1738{
1739 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001740
Sathya Perla8788fdc2009-07-27 22:52:03 +00001741 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001742 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001743 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744 goto err;
1745
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001746 /* Use the default EQ for MCC completions */
1747 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001748 goto mcc_cq_free;
1749
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1752 goto mcc_cq_destroy;
1753
Sathya Perla8788fdc2009-07-27 22:52:03 +00001754 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001755 goto mcc_q_free;
1756
1757 return 0;
1758
1759mcc_q_free:
1760 be_queue_free(adapter, q);
1761mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001762 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001763mcc_cq_free:
1764 be_queue_free(adapter, cq);
1765err:
1766 return -1;
1767}
1768
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769static void be_tx_queues_destroy(struct be_adapter *adapter)
1770{
1771 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 struct be_tx_obj *txo;
1773 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774
Sathya Perla3c8def92011-06-12 20:01:58 +00001775 for_all_tx_queues(adapter, txo, i) {
1776 q = &txo->q;
1777 if (q->created)
1778 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1779 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780
Sathya Perla3c8def92011-06-12 20:01:58 +00001781 q = &txo->cq;
1782 if (q->created)
1783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1784 be_queue_free(adapter, q);
1785 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786}
1787
Sathya Perladafc0fe2011-10-24 02:45:02 +00001788static int be_num_txqs_want(struct be_adapter *adapter)
1789{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001790 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001791 lancer_chip(adapter) || !be_physfn(adapter) ||
1792 adapter->generation == BE_GEN2)
1793 return 1;
1794 else
1795 return MAX_TX_QS;
1796}
1797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001800 struct be_queue_info *cq, *eq;
1801 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001802 struct be_tx_obj *txo;
1803 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perladafc0fe2011-10-24 02:45:02 +00001805 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001806 if (adapter->num_tx_qs != MAX_TX_QS) {
1807 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001808 netif_set_real_num_tx_queues(adapter->netdev,
1809 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001810 rtnl_unlock();
1811 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001812
Sathya Perla3c8def92011-06-12 20:01:58 +00001813 for_all_tx_queues(adapter, txo, i) {
1814 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1816 sizeof(struct be_eth_tx_compl));
1817 if (status)
1818 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 /* If num_evt_qs is less than num_tx_qs, then more than
1821 * one txq share an eq
1822 */
1823 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1824 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1825 if (status)
1826 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829}
1830
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001831static int be_tx_qs_create(struct be_adapter *adapter)
1832{
1833 struct be_tx_obj *txo;
1834 int i, status;
1835
1836 for_all_tx_queues(adapter, txo, i) {
1837 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1838 sizeof(struct be_eth_wrb));
1839 if (status)
1840 return status;
1841
1842 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1843 if (status)
1844 return status;
1845 }
1846
1847 return 0;
1848}
1849
1850static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
1852 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001853 struct be_rx_obj *rxo;
1854 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perla3abcded2010-10-03 22:12:27 -07001856 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001857 q = &rxo->cq;
1858 if (q->created)
1859 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1860 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862}
1863
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001864static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001865{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_rx_obj *rxo;
1868 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001870 /* We'll create as many RSS rings as there are irqs.
1871 * But when there's only one irq there's no use creating RSS rings
1872 */
1873 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1874 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001875
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 for_all_rx_queues(adapter, rxo, i) {
1878 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 cq = &rxo->cq;
1880 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1881 sizeof(struct be_eth_rx_compl));
1882 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1886 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001887 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001889 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 if (adapter->num_rx_qs != MAX_RX_QS)
1892 dev_info(&adapter->pdev->dev,
1893 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001895 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001896}
1897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898static irqreturn_t be_intx(int irq, void *dev)
1899{
1900 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001903 /* With INTx only one EQ is used */
1904 num_evts = event_handle(&adapter->eq_obj[0]);
1905 if (num_evts)
1906 return IRQ_HANDLED;
1907 else
1908 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909}
1910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001913 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 return IRQ_HANDLED;
1917}
1918
Sathya Perla2e588f82011-03-11 02:49:26 +00001919static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920{
Sathya Perla2e588f82011-03-11 02:49:26 +00001921 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922}
1923
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001924static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1925 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926{
Sathya Perla3abcded2010-10-03 22:12:27 -07001927 struct be_adapter *adapter = rxo->adapter;
1928 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001929 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 u32 work_done;
1931
1932 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001933 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 if (!rxcp)
1935 break;
1936
Sathya Perla12004ae2011-08-02 19:57:46 +00001937 /* Is it a flush compl that has no data */
1938 if (unlikely(rxcp->num_rcvd == 0))
1939 goto loop_continue;
1940
1941 /* Discard compl with partial DMA Lancer B0 */
1942 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001944 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001945 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001946
Sathya Perla12004ae2011-08-02 19:57:46 +00001947 /* On BE drop pkts that arrive due to imperfect filtering in
1948 * promiscuous mode on some skews
1949 */
1950 if (unlikely(rxcp->port != adapter->port_num &&
1951 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001953 goto loop_continue;
1954 }
1955
1956 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001958 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001960loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001961 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962 }
1963
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964 if (work_done) {
1965 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001966
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001967 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1968 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 return work_done;
1972}
1973
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1975 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001980 for (work_done = 0; work_done < budget; work_done++) {
1981 txcp = be_tx_compl_get(&txo->cq);
1982 if (!txcp)
1983 break;
1984 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001985 AMAP_GET_BITS(struct amap_eth_tx_compl,
1986 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 }
1988
1989 if (work_done) {
1990 be_cq_notify(adapter, txo->cq.id, true, work_done);
1991 atomic_sub(num_wrbs, &txo->q.used);
1992
1993 /* As Tx wrbs have been freed up, wake up netdev queue
1994 * if it was stopped due to lack of tx wrbs. */
1995 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1996 atomic_read(&txo->q.used) < txo->q.len / 2) {
1997 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001998 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2001 tx_stats(txo)->tx_compl += work_done;
2002 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2003 }
2004 return (work_done < budget); /* Done */
2005}
Sathya Perla3c8def92011-06-12 20:01:58 +00002006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007int be_poll(struct napi_struct *napi, int budget)
2008{
2009 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2010 struct be_adapter *adapter = eqo->adapter;
2011 int max_work = 0, work, i;
2012 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002013
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 /* Process all TXQs serviced by this EQ */
2015 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2016 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2017 eqo->tx_budget, i);
2018 if (!tx_done)
2019 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
2021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 /* This loop will iterate twice for EQ0 in which
2023 * completions of the last RXQ (default one) are also processed
2024 * For other EQs the loop iterates only once
2025 */
2026 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2027 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2028 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002029 }
2030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031 if (is_mcc_eqo(eqo))
2032 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002033
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 if (max_work < budget) {
2035 napi_complete(napi);
2036 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2037 } else {
2038 /* As we'll continue in polling mode, count and clear events */
2039 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002040 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042}
2043
Ajit Khaparded053de92010-09-03 06:23:30 +00002044void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002046 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2047 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002048 u32 i;
2049
Sathya Perla72f02482011-11-10 19:17:58 +00002050 if (adapter->eeh_err || adapter->ue_detected)
2051 return;
2052
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002053 if (lancer_chip(adapter)) {
2054 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2055 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2056 sliport_err1 = ioread32(adapter->db +
2057 SLIPORT_ERROR1_OFFSET);
2058 sliport_err2 = ioread32(adapter->db +
2059 SLIPORT_ERROR2_OFFSET);
2060 }
2061 } else {
2062 pci_read_config_dword(adapter->pdev,
2063 PCICFG_UE_STATUS_LOW, &ue_lo);
2064 pci_read_config_dword(adapter->pdev,
2065 PCICFG_UE_STATUS_HIGH, &ue_hi);
2066 pci_read_config_dword(adapter->pdev,
2067 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2068 pci_read_config_dword(adapter->pdev,
2069 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002071 ue_lo = (ue_lo & (~ue_lo_mask));
2072 ue_hi = (ue_hi & (~ue_hi_mask));
2073 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002075 if (ue_lo || ue_hi ||
2076 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002077 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002078 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002079 dev_err(&adapter->pdev->dev,
2080 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002081 }
2082
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002083 if (ue_lo) {
2084 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2085 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002086 dev_err(&adapter->pdev->dev,
2087 "UE: %s bit set\n", ue_status_low_desc[i]);
2088 }
2089 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002090 if (ue_hi) {
2091 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2092 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002093 dev_err(&adapter->pdev->dev,
2094 "UE: %s bit set\n", ue_status_hi_desc[i]);
2095 }
2096 }
2097
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002098 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2099 dev_err(&adapter->pdev->dev,
2100 "sliport status 0x%x\n", sliport_status);
2101 dev_err(&adapter->pdev->dev,
2102 "sliport error1 0x%x\n", sliport_err1);
2103 dev_err(&adapter->pdev->dev,
2104 "sliport error2 0x%x\n", sliport_err2);
2105 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106}
2107
Sathya Perla8d56ff12009-11-22 22:02:26 +00002108static void be_msix_disable(struct be_adapter *adapter)
2109{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002111 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 }
2114}
2115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116static uint be_num_rss_want(struct be_adapter *adapter)
2117{
2118 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2119 adapter->num_vfs == 0 && be_physfn(adapter) &&
2120 !be_is_mc(adapter))
2121 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2122 else
2123 return 0;
2124}
2125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126static void be_msix_enable(struct be_adapter *adapter)
2127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002129 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 /* If RSS queues are not used, need a vec for default RX Q */
2132 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2133 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002134
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136 adapter->msix_entries[i].entry = i;
2137
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 if (status == 0) {
2140 goto done;
2141 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002142 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002143 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002144 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002146 }
2147 return;
2148done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002149 adapter->num_msix_vec = num_vec;
2150 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151}
2152
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002153static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002154{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002155 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002156
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002157#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002158 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002159 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002160 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002161
2162 pos = pci_find_ext_capability(adapter->pdev,
2163 PCI_EXT_CAP_ID_SRIOV);
2164 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002165 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002166
Sathya Perla11ac75e2011-12-13 00:58:50 +00002167 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2168 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002169 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002170 "Device supports %d VFs and not %d\n",
2171 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002172
Sathya Perla11ac75e2011-12-13 00:58:50 +00002173 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2174 if (status)
2175 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002176
Sathya Perla11ac75e2011-12-13 00:58:50 +00002177 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002178 adapter->vf_cfg = kcalloc(num_vfs,
2179 sizeof(struct be_vf_cfg),
2180 GFP_KERNEL);
2181 if (!adapter->vf_cfg)
2182 return -ENOMEM;
2183 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002184 }
2185#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002186 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002187}
2188
2189static void be_sriov_disable(struct be_adapter *adapter)
2190{
2191#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002192 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002193 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002194 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002195 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002196 }
2197#endif
2198}
2199
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002200static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204}
2205
2206static int be_msix_register(struct be_adapter *adapter)
2207{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 struct net_device *netdev = adapter->netdev;
2209 struct be_eq_obj *eqo;
2210 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 for_all_evt_queues(adapter, eqo, i) {
2213 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2214 vec = be_msix_vec_get(adapter, eqo);
2215 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 if (status)
2217 goto err_msix;
2218 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002219
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002221err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2223 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2224 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2225 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002226 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227 return status;
2228}
2229
2230static int be_irq_register(struct be_adapter *adapter)
2231{
2232 struct net_device *netdev = adapter->netdev;
2233 int status;
2234
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002235 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 status = be_msix_register(adapter);
2237 if (status == 0)
2238 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002239 /* INTx is not supported for VF */
2240 if (!be_physfn(adapter))
2241 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242 }
2243
2244 /* INTx */
2245 netdev->irq = adapter->pdev->irq;
2246 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2247 adapter);
2248 if (status) {
2249 dev_err(&adapter->pdev->dev,
2250 "INTx request IRQ failed - err %d\n", status);
2251 return status;
2252 }
2253done:
2254 adapter->isr_registered = true;
2255 return 0;
2256}
2257
2258static void be_irq_unregister(struct be_adapter *adapter)
2259{
2260 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263
2264 if (!adapter->isr_registered)
2265 return;
2266
2267 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002268 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 free_irq(netdev->irq, adapter);
2270 goto done;
2271 }
2272
2273 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 for_all_evt_queues(adapter, eqo, i)
2275 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277done:
2278 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279}
2280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002282{
2283 struct be_queue_info *q;
2284 struct be_rx_obj *rxo;
2285 int i;
2286
2287 for_all_rx_queues(adapter, rxo, i) {
2288 q = &rxo->q;
2289 if (q->created) {
2290 be_cmd_rxq_destroy(adapter, q);
2291 /* After the rxq is invalidated, wait for a grace time
2292 * of 1ms for all dma to end and the flush compl to
2293 * arrive
2294 */
2295 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002297 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002299 }
2300}
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302static int be_close(struct net_device *netdev)
2303{
2304 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 struct be_eq_obj *eqo;
2306 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307
Sathya Perla889cd4b2010-05-30 23:33:45 +00002308 be_async_mcc_disable(adapter);
2309
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002310 if (!lancer_chip(adapter))
2311 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002312
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 for_all_evt_queues(adapter, eqo, i) {
2314 napi_disable(&eqo->napi);
2315 if (msix_enabled(adapter))
2316 synchronize_irq(be_msix_vec_get(adapter, eqo));
2317 else
2318 synchronize_irq(netdev->irq);
2319 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002320 }
2321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 be_irq_unregister(adapter);
2323
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 /* Wait for all pending tx completions to arrive so that
2325 * all tx skbs are freed.
2326 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002327 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002328
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002330 return 0;
2331}
2332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002334{
2335 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002336 int rc, i, j;
2337 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002338
2339 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2341 sizeof(struct be_eth_rx_d));
2342 if (rc)
2343 return rc;
2344 }
2345
2346 /* The FW would like the default RXQ to be created first */
2347 rxo = default_rxo(adapter);
2348 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2349 adapter->if_handle, false, &rxo->rss_id);
2350 if (rc)
2351 return rc;
2352
2353 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002354 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 rx_frag_size, adapter->if_handle,
2356 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002357 if (rc)
2358 return rc;
2359 }
2360
2361 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002362 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2363 for_all_rss_queues(adapter, rxo, i) {
2364 if ((j + i) >= 128)
2365 break;
2366 rsstable[j + i] = rxo->rss_id;
2367 }
2368 }
2369 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002370 if (rc)
2371 return rc;
2372 }
2373
2374 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002375 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002376 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002377 return 0;
2378}
2379
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380static int be_open(struct net_device *netdev)
2381{
2382 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002384 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002386 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002387 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002388
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002390 if (status)
2391 goto err;
2392
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393 be_irq_register(adapter);
2394
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002399 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002400
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 for_all_tx_queues(adapter, txo, i)
2402 be_cq_notify(adapter, txo->cq.id, true, 0);
2403
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002404 be_async_mcc_enable(adapter);
2405
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 for_all_evt_queues(adapter, eqo, i) {
2407 napi_enable(&eqo->napi);
2408 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2409 }
2410
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002411 status = be_cmd_link_status_query(adapter, NULL, NULL,
2412 &link_status, 0);
2413 if (!status)
2414 be_link_status_update(adapter, link_status);
2415
Sathya Perla889cd4b2010-05-30 23:33:45 +00002416 return 0;
2417err:
2418 be_close(adapter->netdev);
2419 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420}
2421
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002422static int be_setup_wol(struct be_adapter *adapter, bool enable)
2423{
2424 struct be_dma_mem cmd;
2425 int status = 0;
2426 u8 mac[ETH_ALEN];
2427
2428 memset(mac, 0, ETH_ALEN);
2429
2430 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002431 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2432 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002433 if (cmd.va == NULL)
2434 return -1;
2435 memset(cmd.va, 0, cmd.size);
2436
2437 if (enable) {
2438 status = pci_write_config_dword(adapter->pdev,
2439 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2440 if (status) {
2441 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002442 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002443 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2444 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002445 return status;
2446 }
2447 status = be_cmd_enable_magic_wol(adapter,
2448 adapter->netdev->dev_addr, &cmd);
2449 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2450 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2451 } else {
2452 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2453 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2454 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2455 }
2456
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002457 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002458 return status;
2459}
2460
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002461/*
2462 * Generate a seed MAC address from the PF MAC Address using jhash.
2463 * MAC Address for VFs are assigned incrementally starting from the seed.
2464 * These addresses are programmed in the ASIC by the PF and the VF driver
2465 * queries for the MAC address during its probe.
2466 */
2467static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2468{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002469 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002470 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002471 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002472 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002473
2474 be_vf_eth_addr_generate(adapter, mac);
2475
Sathya Perla11ac75e2011-12-13 00:58:50 +00002476 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002477 if (lancer_chip(adapter)) {
2478 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2479 } else {
2480 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002481 vf_cfg->if_handle,
2482 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002483 }
2484
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002485 if (status)
2486 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002487 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002488 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002489 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490
2491 mac[5] += 1;
2492 }
2493 return status;
2494}
2495
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002496static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002498 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002499 u32 vf;
2500
Sathya Perla11ac75e2011-12-13 00:58:50 +00002501 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002502 if (lancer_chip(adapter))
2503 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2504 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2506 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2509 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002510}
2511
Sathya Perlaa54769f2011-10-24 02:45:00 +00002512static int be_clear(struct be_adapter *adapter)
2513{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002514 int i = 1;
2515
Sathya Perla191eb752012-02-23 18:50:13 +00002516 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2517 cancel_delayed_work_sync(&adapter->work);
2518 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2519 }
2520
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002522 be_vf_clear(adapter);
2523
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002524 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2525 be_cmd_pmac_del(adapter, adapter->if_handle,
2526 adapter->pmac_id[i], 0);
2527
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002528 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002529
2530 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002531 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002532 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002533 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002534
2535 /* tell fw we're done with firing cmds */
2536 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537
2538 be_msix_disable(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002539 kfree(adapter->pmac_id);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540 return 0;
2541}
2542
Sathya Perla30128032011-11-10 19:17:57 +00002543static void be_vf_setup_init(struct be_adapter *adapter)
2544{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002545 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002546 int vf;
2547
Sathya Perla11ac75e2011-12-13 00:58:50 +00002548 for_all_vfs(adapter, vf_cfg, vf) {
2549 vf_cfg->if_handle = -1;
2550 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002551 }
2552}
2553
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002554static int be_vf_setup(struct be_adapter *adapter)
2555{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002556 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002557 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002558 u16 def_vlan, lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002559 int status;
2560
Sathya Perla30128032011-11-10 19:17:57 +00002561 be_vf_setup_init(adapter);
2562
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002563 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2564 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002566 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002567 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568 if (status)
2569 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 }
2571
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002572 status = be_vf_eth_addr_config(adapter);
2573 if (status)
2574 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002575
Sathya Perla11ac75e2011-12-13 00:58:50 +00002576 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002578 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002579 if (status)
2580 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002581 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002582
2583 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2584 vf + 1, vf_cfg->if_handle);
2585 if (status)
2586 goto err;
2587 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002588 }
2589 return 0;
2590err:
2591 return status;
2592}
2593
Sathya Perla30128032011-11-10 19:17:57 +00002594static void be_setup_init(struct be_adapter *adapter)
2595{
2596 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002597 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002598 adapter->if_handle = -1;
2599 adapter->be3_native = false;
2600 adapter->promiscuous = false;
2601 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002602 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002603}
2604
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002605static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002606{
2607 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002608 int status;
2609 bool pmac_id_active;
2610
2611 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2612 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002613 if (status != 0)
2614 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002615
2616 if (pmac_id_active) {
2617 status = be_cmd_mac_addr_query(adapter, mac,
2618 MAC_ADDRESS_TYPE_NETWORK,
2619 false, adapter->if_handle, pmac_id);
2620
2621 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002622 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002623 } else {
2624 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002625 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002626 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002627do_none:
2628 return status;
2629}
2630
Sathya Perla5fb379e2009-06-18 00:02:59 +00002631static int be_setup(struct be_adapter *adapter)
2632{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002634 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002635 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002637 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Sathya Perla30128032011-11-10 19:17:57 +00002639 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002640
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002641 be_cmd_req_native_mode(adapter);
2642
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643 be_msix_enable(adapter);
2644
2645 status = be_evt_queues_create(adapter);
2646 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002647 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002649 status = be_tx_cqs_create(adapter);
2650 if (status)
2651 goto err;
2652
2653 status = be_rx_cqs_create(adapter);
2654 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002655 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002656
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002659 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002660
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002661 memset(mac, 0, ETH_ALEN);
2662 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002663 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002664 if (status)
2665 return status;
2666 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2667 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2668
2669 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2670 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2671 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002672 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2673
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002674 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2675 cap_flags |= BE_IF_FLAGS_RSS;
2676 en_flags |= BE_IF_FLAGS_RSS;
2677 }
2678 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2679 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002680 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002681 if (status != 0)
2682 goto err;
2683
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002684 /* The VF's permanent mac queried from card is incorrect.
2685 * For BEx: Query the mac configued by the PF using if_handle
2686 * For Lancer: Get and use mac_list to obtain mac address.
2687 */
2688 if (!be_physfn(adapter)) {
2689 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002690 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002691 else
2692 status = be_cmd_mac_addr_query(adapter, mac,
2693 MAC_ADDRESS_TYPE_NETWORK, false,
2694 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002695 if (!status) {
2696 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2697 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2698 }
2699 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002700
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 status = be_tx_qs_create(adapter);
2702 if (status)
2703 goto err;
2704
Sathya Perla04b71172011-09-27 13:30:27 -04002705 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002706
Sathya Perlaa54769f2011-10-24 02:45:00 +00002707 status = be_vid_config(adapter, false, 0);
2708 if (status)
2709 goto err;
2710
2711 be_set_rx_mode(adapter->netdev);
2712
2713 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714 /* For Lancer: It is legal for this cmd to fail on VF */
2715 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002716 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002717
Sathya Perlaa54769f2011-10-24 02:45:00 +00002718 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2719 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2720 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002721 /* For Lancer: It is legal for this cmd to fail on VF */
2722 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002723 goto err;
2724 }
2725
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002726 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727
Sathya Perla11ac75e2011-12-13 00:58:50 +00002728 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002729 status = be_vf_setup(adapter);
2730 if (status)
2731 goto err;
2732 }
2733
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002734 be_cmd_get_phy_info(adapter);
2735 if (be_pause_supported(adapter))
2736 adapter->phy.fc_autoneg = 1;
2737
Sathya Perla191eb752012-02-23 18:50:13 +00002738 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2739 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2740
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002742err:
2743 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744 return status;
2745}
2746
Ivan Vecera66268732011-12-08 01:31:21 +00002747#ifdef CONFIG_NET_POLL_CONTROLLER
2748static void be_netpoll(struct net_device *netdev)
2749{
2750 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002752 int i;
2753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754 for_all_evt_queues(adapter, eqo, i)
2755 event_handle(eqo);
2756
2757 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002758}
2759#endif
2760
Ajit Khaparde84517482009-09-04 03:12:16 +00002761#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002762static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002763 const u8 *p, u32 img_start, int image_size,
2764 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002765{
2766 u32 crc_offset;
2767 u8 flashed_crc[4];
2768 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002769
2770 crc_offset = hdr_size + img_start + image_size - 4;
2771
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002772 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002773
2774 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002775 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002776 if (status) {
2777 dev_err(&adapter->pdev->dev,
2778 "could not get crc from flash, not flashing redboot\n");
2779 return false;
2780 }
2781
2782 /*update redboot only if crc does not match*/
2783 if (!memcmp(flashed_crc, p, 4))
2784 return false;
2785 else
2786 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002787}
2788
Sathya Perla306f1342011-08-02 19:57:45 +00002789static bool phy_flashing_required(struct be_adapter *adapter)
2790{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002791 return (adapter->phy.phy_type == TN_8022 &&
2792 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002793}
2794
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002795static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002796 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002797 struct be_dma_mem *flash_cmd, int num_of_images)
2798
Ajit Khaparde84517482009-09-04 03:12:16 +00002799{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002800 int status = 0, i, filehdr_size = 0;
2801 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002802 int num_bytes;
2803 const u8 *p = fw->data;
2804 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002805 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002806 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002807
Sathya Perla306f1342011-08-02 19:57:45 +00002808 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002809 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2810 FLASH_IMAGE_MAX_SIZE_g3},
2811 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2812 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2813 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2814 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2815 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2816 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2817 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2818 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2819 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2820 FLASH_IMAGE_MAX_SIZE_g3},
2821 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2822 FLASH_IMAGE_MAX_SIZE_g3},
2823 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002824 FLASH_IMAGE_MAX_SIZE_g3},
2825 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002826 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2827 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2828 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002829 };
Joe Perches215faf92010-12-21 02:16:10 -08002830 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002831 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2832 FLASH_IMAGE_MAX_SIZE_g2},
2833 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2834 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2835 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2836 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2837 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2838 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2839 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2840 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2841 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2842 FLASH_IMAGE_MAX_SIZE_g2},
2843 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2844 FLASH_IMAGE_MAX_SIZE_g2},
2845 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2846 FLASH_IMAGE_MAX_SIZE_g2}
2847 };
2848
2849 if (adapter->generation == BE_GEN3) {
2850 pflashcomp = gen3_flash_types;
2851 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002852 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002853 } else {
2854 pflashcomp = gen2_flash_types;
2855 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002856 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002857 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002858 for (i = 0; i < num_comp; i++) {
2859 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2860 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2861 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002862 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2863 if (!phy_flashing_required(adapter))
2864 continue;
2865 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002866 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2867 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002868 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2869 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002870 continue;
2871 p = fw->data;
2872 p += filehdr_size + pflashcomp[i].offset
2873 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002874 if (p + pflashcomp[i].size > fw->data + fw->size)
2875 return -1;
2876 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002877 while (total_bytes) {
2878 if (total_bytes > 32*1024)
2879 num_bytes = 32*1024;
2880 else
2881 num_bytes = total_bytes;
2882 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002883 if (!total_bytes) {
2884 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2885 flash_op = FLASHROM_OPER_PHY_FLASH;
2886 else
2887 flash_op = FLASHROM_OPER_FLASH;
2888 } else {
2889 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2890 flash_op = FLASHROM_OPER_PHY_SAVE;
2891 else
2892 flash_op = FLASHROM_OPER_SAVE;
2893 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002894 memcpy(req->params.data_buf, p, num_bytes);
2895 p += num_bytes;
2896 status = be_cmd_write_flashrom(adapter, flash_cmd,
2897 pflashcomp[i].optype, flash_op, num_bytes);
2898 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002899 if ((status == ILLEGAL_IOCTL_REQ) &&
2900 (pflashcomp[i].optype ==
2901 IMG_TYPE_PHY_FW))
2902 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002903 dev_err(&adapter->pdev->dev,
2904 "cmd to write to flash rom failed.\n");
2905 return -1;
2906 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002907 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002908 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002909 return 0;
2910}
2911
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002912static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2913{
2914 if (fhdr == NULL)
2915 return 0;
2916 if (fhdr->build[0] == '3')
2917 return BE_GEN3;
2918 else if (fhdr->build[0] == '2')
2919 return BE_GEN2;
2920 else
2921 return 0;
2922}
2923
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002924static int lancer_fw_download(struct be_adapter *adapter,
2925 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002926{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002927#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2928#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2929 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002930 const u8 *data_ptr = NULL;
2931 u8 *dest_image_ptr = NULL;
2932 size_t image_size = 0;
2933 u32 chunk_size = 0;
2934 u32 data_written = 0;
2935 u32 offset = 0;
2936 int status = 0;
2937 u8 add_status = 0;
2938
2939 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2940 dev_err(&adapter->pdev->dev,
2941 "FW Image not properly aligned. "
2942 "Length must be 4 byte aligned.\n");
2943 status = -EINVAL;
2944 goto lancer_fw_exit;
2945 }
2946
2947 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2948 + LANCER_FW_DOWNLOAD_CHUNK;
2949 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2950 &flash_cmd.dma, GFP_KERNEL);
2951 if (!flash_cmd.va) {
2952 status = -ENOMEM;
2953 dev_err(&adapter->pdev->dev,
2954 "Memory allocation failure while flashing\n");
2955 goto lancer_fw_exit;
2956 }
2957
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002958 dest_image_ptr = flash_cmd.va +
2959 sizeof(struct lancer_cmd_req_write_object);
2960 image_size = fw->size;
2961 data_ptr = fw->data;
2962
2963 while (image_size) {
2964 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2965
2966 /* Copy the image chunk content. */
2967 memcpy(dest_image_ptr, data_ptr, chunk_size);
2968
2969 status = lancer_cmd_write_object(adapter, &flash_cmd,
2970 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2971 &data_written, &add_status);
2972
2973 if (status)
2974 break;
2975
2976 offset += data_written;
2977 data_ptr += data_written;
2978 image_size -= data_written;
2979 }
2980
2981 if (!status) {
2982 /* Commit the FW written */
2983 status = lancer_cmd_write_object(adapter, &flash_cmd,
2984 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2985 &data_written, &add_status);
2986 }
2987
2988 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2989 flash_cmd.dma);
2990 if (status) {
2991 dev_err(&adapter->pdev->dev,
2992 "Firmware load error. "
2993 "Status code: 0x%x Additional Status: 0x%x\n",
2994 status, add_status);
2995 goto lancer_fw_exit;
2996 }
2997
2998 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2999lancer_fw_exit:
3000 return status;
3001}
3002
3003static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3004{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003005 struct flash_file_hdr_g2 *fhdr;
3006 struct flash_file_hdr_g3 *fhdr3;
3007 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003008 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003009 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003010 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003011
3012 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003013 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003014
Ajit Khaparde84517482009-09-04 03:12:16 +00003015 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003016 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3017 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003018 if (!flash_cmd.va) {
3019 status = -ENOMEM;
3020 dev_err(&adapter->pdev->dev,
3021 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003022 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003023 }
3024
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003025 if ((adapter->generation == BE_GEN3) &&
3026 (get_ufigen_type(fhdr) == BE_GEN3)) {
3027 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003028 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3029 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003030 img_hdr_ptr = (struct image_hdr *) (fw->data +
3031 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003032 i * sizeof(struct image_hdr)));
3033 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3034 status = be_flash_data(adapter, fw, &flash_cmd,
3035 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003036 }
3037 } else if ((adapter->generation == BE_GEN2) &&
3038 (get_ufigen_type(fhdr) == BE_GEN2)) {
3039 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3040 } else {
3041 dev_err(&adapter->pdev->dev,
3042 "UFI and Interface are not compatible for flashing\n");
3043 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003044 }
3045
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003046 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3047 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003048 if (status) {
3049 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003050 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003051 }
3052
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003053 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003054
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003055be_fw_exit:
3056 return status;
3057}
3058
3059int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3060{
3061 const struct firmware *fw;
3062 int status;
3063
3064 if (!netif_running(adapter->netdev)) {
3065 dev_err(&adapter->pdev->dev,
3066 "Firmware load not allowed (interface is down)\n");
3067 return -1;
3068 }
3069
3070 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3071 if (status)
3072 goto fw_exit;
3073
3074 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3075
3076 if (lancer_chip(adapter))
3077 status = lancer_fw_download(adapter, fw);
3078 else
3079 status = be_fw_download(adapter, fw);
3080
Ajit Khaparde84517482009-09-04 03:12:16 +00003081fw_exit:
3082 release_firmware(fw);
3083 return status;
3084}
3085
stephen hemmingere5686ad2012-01-05 19:10:25 +00003086static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087 .ndo_open = be_open,
3088 .ndo_stop = be_close,
3089 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003090 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091 .ndo_set_mac_address = be_mac_addr_set,
3092 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003093 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003094 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3096 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003097 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003098 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003099 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003100 .ndo_get_vf_config = be_get_vf_config,
3101#ifdef CONFIG_NET_POLL_CONTROLLER
3102 .ndo_poll_controller = be_netpoll,
3103#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104};
3105
3106static void be_netdev_init(struct net_device *netdev)
3107{
3108 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003110 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003111
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003112 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003113 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3114 NETIF_F_HW_VLAN_TX;
3115 if (be_multi_rxq(adapter))
3116 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003117
3118 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003119 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003120
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003121 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003122 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003123
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003124 netdev->priv_flags |= IFF_UNICAST_FLT;
3125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126 netdev->flags |= IFF_MULTICAST;
3127
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003128 netif_set_gso_max_size(netdev, 65535);
3129
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003130 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131
3132 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3133
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003134 for_all_evt_queues(adapter, eqo, i)
3135 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003136}
3137
3138static void be_unmap_pci_bars(struct be_adapter *adapter)
3139{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003140 if (adapter->csr)
3141 iounmap(adapter->csr);
3142 if (adapter->db)
3143 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003144}
3145
3146static int be_map_pci_bars(struct be_adapter *adapter)
3147{
3148 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003149 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003151 if (lancer_chip(adapter)) {
3152 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3153 pci_resource_len(adapter->pdev, 0));
3154 if (addr == NULL)
3155 return -ENOMEM;
3156 adapter->db = addr;
3157 return 0;
3158 }
3159
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003160 if (be_physfn(adapter)) {
3161 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3162 pci_resource_len(adapter->pdev, 2));
3163 if (addr == NULL)
3164 return -ENOMEM;
3165 adapter->csr = addr;
3166 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003168 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003169 db_reg = 4;
3170 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003171 if (be_physfn(adapter))
3172 db_reg = 4;
3173 else
3174 db_reg = 0;
3175 }
3176 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3177 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178 if (addr == NULL)
3179 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003180 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182 return 0;
3183pci_map_err:
3184 be_unmap_pci_bars(adapter);
3185 return -ENOMEM;
3186}
3187
3188
3189static void be_ctrl_cleanup(struct be_adapter *adapter)
3190{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003191 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192
3193 be_unmap_pci_bars(adapter);
3194
3195 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003196 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3197 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003198
Sathya Perla5b8821b2011-08-02 19:57:44 +00003199 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003200 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003201 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3202 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203}
3204
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003205static int be_ctrl_init(struct be_adapter *adapter)
3206{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003207 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3208 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003209 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003211
3212 status = be_map_pci_bars(adapter);
3213 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003214 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003215
3216 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003217 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3218 mbox_mem_alloc->size,
3219 &mbox_mem_alloc->dma,
3220 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003221 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003222 status = -ENOMEM;
3223 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003224 }
3225 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3226 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3227 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3228 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003229
Sathya Perla5b8821b2011-08-02 19:57:44 +00003230 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3231 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3232 &rx_filter->dma, GFP_KERNEL);
3233 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003234 status = -ENOMEM;
3235 goto free_mbox;
3236 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003237 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003238
Ivan Vecera29849612010-12-14 05:43:19 +00003239 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003240 spin_lock_init(&adapter->mcc_lock);
3241 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003243 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003244 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003246
3247free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003248 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3249 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003250
3251unmap_pci_bars:
3252 be_unmap_pci_bars(adapter);
3253
3254done:
3255 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003256}
3257
3258static void be_stats_cleanup(struct be_adapter *adapter)
3259{
Sathya Perla3abcded2010-10-03 22:12:27 -07003260 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003261
3262 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003263 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3264 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003265}
3266
3267static int be_stats_init(struct be_adapter *adapter)
3268{
Sathya Perla3abcded2010-10-03 22:12:27 -07003269 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003270
Selvin Xavier005d5692011-05-16 07:36:35 +00003271 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003272 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003273 } else {
3274 if (lancer_chip(adapter))
3275 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3276 else
3277 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3278 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003279 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3280 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003281 if (cmd->va == NULL)
3282 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003283 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 return 0;
3285}
3286
3287static void __devexit be_remove(struct pci_dev *pdev)
3288{
3289 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003290
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003291 if (!adapter)
3292 return;
3293
3294 unregister_netdev(adapter->netdev);
3295
Sathya Perla5fb379e2009-06-18 00:02:59 +00003296 be_clear(adapter);
3297
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298 be_stats_cleanup(adapter);
3299
3300 be_ctrl_cleanup(adapter);
3301
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003302 be_sriov_disable(adapter);
3303
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003304 pci_set_drvdata(pdev, NULL);
3305 pci_release_regions(pdev);
3306 pci_disable_device(pdev);
3307
3308 free_netdev(adapter->netdev);
3309}
3310
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003311bool be_is_wol_supported(struct be_adapter *adapter)
3312{
3313 return ((adapter->wol_cap & BE_WOL_CAP) &&
3314 !be_is_wol_excluded(adapter)) ? true : false;
3315}
3316
Sathya Perla2243e2e2009-11-22 22:02:03 +00003317static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003318{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003320
Sathya Perla3abcded2010-10-03 22:12:27 -07003321 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3322 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003323 if (status)
3324 return status;
3325
Sathya Perla752961a2011-10-24 02:45:03 +00003326 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003327 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003328 else
3329 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3330
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003331 if (be_physfn(adapter))
3332 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3333 else
3334 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3335
3336 /* primary mac needs 1 pmac entry */
3337 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3338 sizeof(u32), GFP_KERNEL);
3339 if (!adapter->pmac_id)
3340 return -ENOMEM;
3341
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003342 status = be_cmd_get_cntl_attributes(adapter);
3343 if (status)
3344 return status;
3345
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003346 status = be_cmd_get_acpi_wol_cap(adapter);
3347 if (status) {
3348 /* in case of a failure to get wol capabillities
3349 * check the exclusion list to determine WOL capability */
3350 if (!be_is_wol_excluded(adapter))
3351 adapter->wol_cap |= BE_WOL_CAP;
3352 }
3353
3354 if (be_is_wol_supported(adapter))
3355 adapter->wol = true;
3356
Sathya Perla2243e2e2009-11-22 22:02:03 +00003357 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003358}
3359
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003360static int be_dev_family_check(struct be_adapter *adapter)
3361{
3362 struct pci_dev *pdev = adapter->pdev;
3363 u32 sli_intf = 0, if_type;
3364
3365 switch (pdev->device) {
3366 case BE_DEVICE_ID1:
3367 case OC_DEVICE_ID1:
3368 adapter->generation = BE_GEN2;
3369 break;
3370 case BE_DEVICE_ID2:
3371 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003372 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003373 adapter->generation = BE_GEN3;
3374 break;
3375 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003376 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003377 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3378 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3379 SLI_INTF_IF_TYPE_SHIFT;
3380
3381 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3382 if_type != 0x02) {
3383 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3384 return -EINVAL;
3385 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003386 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3387 SLI_INTF_FAMILY_SHIFT);
3388 adapter->generation = BE_GEN3;
3389 break;
3390 default:
3391 adapter->generation = 0;
3392 }
3393 return 0;
3394}
3395
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003396static int lancer_wait_ready(struct be_adapter *adapter)
3397{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003398#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003399 u32 sliport_status;
3400 int status = 0, i;
3401
3402 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3403 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3404 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3405 break;
3406
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003407 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003408 }
3409
3410 if (i == SLIPORT_READY_TIMEOUT)
3411 status = -1;
3412
3413 return status;
3414}
3415
3416static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3417{
3418 int status;
3419 u32 sliport_status, err, reset_needed;
3420 status = lancer_wait_ready(adapter);
3421 if (!status) {
3422 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3423 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3424 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3425 if (err && reset_needed) {
3426 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3427 adapter->db + SLIPORT_CONTROL_OFFSET);
3428
3429 /* check adapter has corrected the error */
3430 status = lancer_wait_ready(adapter);
3431 sliport_status = ioread32(adapter->db +
3432 SLIPORT_STATUS_OFFSET);
3433 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3434 SLIPORT_STATUS_RN_MASK);
3435 if (status || sliport_status)
3436 status = -1;
3437 } else if (err || reset_needed) {
3438 status = -1;
3439 }
3440 }
3441 return status;
3442}
3443
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003444static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3445{
3446 int status;
3447 u32 sliport_status;
3448
3449 if (adapter->eeh_err || adapter->ue_detected)
3450 return;
3451
3452 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3453
3454 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3455 dev_err(&adapter->pdev->dev,
3456 "Adapter in error state."
3457 "Trying to recover.\n");
3458
3459 status = lancer_test_and_set_rdy_state(adapter);
3460 if (status)
3461 goto err;
3462
3463 netif_device_detach(adapter->netdev);
3464
3465 if (netif_running(adapter->netdev))
3466 be_close(adapter->netdev);
3467
3468 be_clear(adapter);
3469
3470 adapter->fw_timeout = false;
3471
3472 status = be_setup(adapter);
3473 if (status)
3474 goto err;
3475
3476 if (netif_running(adapter->netdev)) {
3477 status = be_open(adapter->netdev);
3478 if (status)
3479 goto err;
3480 }
3481
3482 netif_device_attach(adapter->netdev);
3483
3484 dev_err(&adapter->pdev->dev,
3485 "Adapter error recovery succeeded\n");
3486 }
3487 return;
3488err:
3489 dev_err(&adapter->pdev->dev,
3490 "Adapter error recovery failed\n");
3491}
3492
3493static void be_worker(struct work_struct *work)
3494{
3495 struct be_adapter *adapter =
3496 container_of(work, struct be_adapter, work.work);
3497 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003498 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003499 int i;
3500
3501 if (lancer_chip(adapter))
3502 lancer_test_and_recover_fn_err(adapter);
3503
3504 be_detect_dump_ue(adapter);
3505
3506 /* when interrupts are not yet enabled, just reap any pending
3507 * mcc completions */
3508 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003509 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003510 goto reschedule;
3511 }
3512
3513 if (!adapter->stats_cmd_sent) {
3514 if (lancer_chip(adapter))
3515 lancer_cmd_get_pport_stats(adapter,
3516 &adapter->stats_cmd);
3517 else
3518 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3519 }
3520
3521 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003522 if (rxo->rx_post_starved) {
3523 rxo->rx_post_starved = false;
3524 be_post_rx_frags(rxo, GFP_KERNEL);
3525 }
3526 }
3527
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003528 for_all_evt_queues(adapter, eqo, i)
3529 be_eqd_update(adapter, eqo);
3530
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003531reschedule:
3532 adapter->work_counter++;
3533 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3534}
3535
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003536static int __devinit be_probe(struct pci_dev *pdev,
3537 const struct pci_device_id *pdev_id)
3538{
3539 int status = 0;
3540 struct be_adapter *adapter;
3541 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003542
3543 status = pci_enable_device(pdev);
3544 if (status)
3545 goto do_none;
3546
3547 status = pci_request_regions(pdev, DRV_NAME);
3548 if (status)
3549 goto disable_dev;
3550 pci_set_master(pdev);
3551
Sathya Perla3c8def92011-06-12 20:01:58 +00003552 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553 if (netdev == NULL) {
3554 status = -ENOMEM;
3555 goto rel_reg;
3556 }
3557 adapter = netdev_priv(netdev);
3558 adapter->pdev = pdev;
3559 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003560
3561 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003562 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003563 goto free_netdev;
3564
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003565 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003566 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003568 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003569 if (!status) {
3570 netdev->features |= NETIF_F_HIGHDMA;
3571 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003572 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003573 if (status) {
3574 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3575 goto free_netdev;
3576 }
3577 }
3578
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003579 status = be_sriov_enable(adapter);
3580 if (status)
3581 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003582
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003583 status = be_ctrl_init(adapter);
3584 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003585 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003586
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003587 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003588 status = lancer_wait_ready(adapter);
3589 if (!status) {
3590 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3591 adapter->db + SLIPORT_CONTROL_OFFSET);
3592 status = lancer_test_and_set_rdy_state(adapter);
3593 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003594 if (status) {
3595 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003596 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003597 }
3598 }
3599
Sathya Perla2243e2e2009-11-22 22:02:03 +00003600 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003601 if (be_physfn(adapter)) {
3602 status = be_cmd_POST(adapter);
3603 if (status)
3604 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003605 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003606
3607 /* tell fw we're ready to fire cmds */
3608 status = be_cmd_fw_init(adapter);
3609 if (status)
3610 goto ctrl_clean;
3611
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003612 status = be_cmd_reset_function(adapter);
3613 if (status)
3614 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003615
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003616 /* The INTR bit may be set in the card when probed by a kdump kernel
3617 * after a crash.
3618 */
3619 if (!lancer_chip(adapter))
3620 be_intr_set(adapter, false);
3621
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622 status = be_stats_init(adapter);
3623 if (status)
3624 goto ctrl_clean;
3625
Sathya Perla2243e2e2009-11-22 22:02:03 +00003626 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003627 if (status)
3628 goto stats_clean;
3629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003630 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003631 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003632
Sathya Perla5fb379e2009-06-18 00:02:59 +00003633 status = be_setup(adapter);
3634 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003635 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003636
Sathya Perla3abcded2010-10-03 22:12:27 -07003637 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003638 status = register_netdev(netdev);
3639 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003640 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003642 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3643 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003644
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645 return 0;
3646
Sathya Perla5fb379e2009-06-18 00:02:59 +00003647unsetup:
3648 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003649msix_disable:
3650 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003651stats_clean:
3652 be_stats_cleanup(adapter);
3653ctrl_clean:
3654 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003655disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003656 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003657free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003658 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003659 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660rel_reg:
3661 pci_release_regions(pdev);
3662disable_dev:
3663 pci_disable_device(pdev);
3664do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003665 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003666 return status;
3667}
3668
3669static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3670{
3671 struct be_adapter *adapter = pci_get_drvdata(pdev);
3672 struct net_device *netdev = adapter->netdev;
3673
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003674 if (adapter->wol)
3675 be_setup_wol(adapter, true);
3676
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677 netif_device_detach(netdev);
3678 if (netif_running(netdev)) {
3679 rtnl_lock();
3680 be_close(netdev);
3681 rtnl_unlock();
3682 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003683 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003684
3685 pci_save_state(pdev);
3686 pci_disable_device(pdev);
3687 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3688 return 0;
3689}
3690
3691static int be_resume(struct pci_dev *pdev)
3692{
3693 int status = 0;
3694 struct be_adapter *adapter = pci_get_drvdata(pdev);
3695 struct net_device *netdev = adapter->netdev;
3696
3697 netif_device_detach(netdev);
3698
3699 status = pci_enable_device(pdev);
3700 if (status)
3701 return status;
3702
3703 pci_set_power_state(pdev, 0);
3704 pci_restore_state(pdev);
3705
Sathya Perla2243e2e2009-11-22 22:02:03 +00003706 /* tell fw we're ready to fire cmds */
3707 status = be_cmd_fw_init(adapter);
3708 if (status)
3709 return status;
3710
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003711 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003712 if (netif_running(netdev)) {
3713 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003714 be_open(netdev);
3715 rtnl_unlock();
3716 }
3717 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003718
3719 if (adapter->wol)
3720 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003721
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003722 return 0;
3723}
3724
Sathya Perla82456b02010-02-17 01:35:37 +00003725/*
3726 * An FLR will stop BE from DMAing any data.
3727 */
3728static void be_shutdown(struct pci_dev *pdev)
3729{
3730 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003731
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003732 if (!adapter)
3733 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003734
Sathya Perla0f4a6822011-03-21 20:49:28 +00003735 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003736
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003737 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003738
Sathya Perla82456b02010-02-17 01:35:37 +00003739 if (adapter->wol)
3740 be_setup_wol(adapter, true);
3741
Ajit Khaparde57841862011-04-06 18:08:43 +00003742 be_cmd_reset_function(adapter);
3743
Sathya Perla82456b02010-02-17 01:35:37 +00003744 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003745}
3746
Sathya Perlacf588472010-02-14 21:22:01 +00003747static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3748 pci_channel_state_t state)
3749{
3750 struct be_adapter *adapter = pci_get_drvdata(pdev);
3751 struct net_device *netdev = adapter->netdev;
3752
3753 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3754
3755 adapter->eeh_err = true;
3756
3757 netif_device_detach(netdev);
3758
3759 if (netif_running(netdev)) {
3760 rtnl_lock();
3761 be_close(netdev);
3762 rtnl_unlock();
3763 }
3764 be_clear(adapter);
3765
3766 if (state == pci_channel_io_perm_failure)
3767 return PCI_ERS_RESULT_DISCONNECT;
3768
3769 pci_disable_device(pdev);
3770
3771 return PCI_ERS_RESULT_NEED_RESET;
3772}
3773
3774static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3775{
3776 struct be_adapter *adapter = pci_get_drvdata(pdev);
3777 int status;
3778
3779 dev_info(&adapter->pdev->dev, "EEH reset\n");
3780 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003781 adapter->ue_detected = false;
3782 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003783
3784 status = pci_enable_device(pdev);
3785 if (status)
3786 return PCI_ERS_RESULT_DISCONNECT;
3787
3788 pci_set_master(pdev);
3789 pci_set_power_state(pdev, 0);
3790 pci_restore_state(pdev);
3791
3792 /* Check if card is ok and fw is ready */
3793 status = be_cmd_POST(adapter);
3794 if (status)
3795 return PCI_ERS_RESULT_DISCONNECT;
3796
3797 return PCI_ERS_RESULT_RECOVERED;
3798}
3799
3800static void be_eeh_resume(struct pci_dev *pdev)
3801{
3802 int status = 0;
3803 struct be_adapter *adapter = pci_get_drvdata(pdev);
3804 struct net_device *netdev = adapter->netdev;
3805
3806 dev_info(&adapter->pdev->dev, "EEH resume\n");
3807
3808 pci_save_state(pdev);
3809
3810 /* tell fw we're ready to fire cmds */
3811 status = be_cmd_fw_init(adapter);
3812 if (status)
3813 goto err;
3814
3815 status = be_setup(adapter);
3816 if (status)
3817 goto err;
3818
3819 if (netif_running(netdev)) {
3820 status = be_open(netdev);
3821 if (status)
3822 goto err;
3823 }
3824 netif_device_attach(netdev);
3825 return;
3826err:
3827 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003828}
3829
3830static struct pci_error_handlers be_eeh_handlers = {
3831 .error_detected = be_eeh_err_detected,
3832 .slot_reset = be_eeh_reset,
3833 .resume = be_eeh_resume,
3834};
3835
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836static struct pci_driver be_driver = {
3837 .name = DRV_NAME,
3838 .id_table = be_dev_ids,
3839 .probe = be_probe,
3840 .remove = be_remove,
3841 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003842 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003843 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003844 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845};
3846
3847static int __init be_init_module(void)
3848{
Joe Perches8e95a202009-12-03 07:58:21 +00003849 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3850 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851 printk(KERN_WARNING DRV_NAME
3852 " : Module param rx_frag_size must be 2048/4096/8192."
3853 " Using 2048\n");
3854 rx_frag_size = 2048;
3855 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003856
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003857 return pci_register_driver(&be_driver);
3858}
3859module_init(be_init_module);
3860
3861static void __exit be_exit_module(void)
3862{
3863 pci_unregister_driver(&be_driver);
3864}
3865module_exit(be_exit_module);