blob: a9a11d426f08744fe9ee5cd0fc97ea5ad9d02823 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432}
433
Sathya Perlaab1594e2011-07-25 19:10:15 +0000434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000437 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700439 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000440 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000441 u64 pkts, bytes;
442 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444
Sathya Perla3abcded2010-10-03 22:12:27 -0700445 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700457 }
458
Sathya Perla3c8def92011-06-12 20:01:58 +0000459 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469
470 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000480 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000483 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000486
Sathya Perlaab1594e2011-07-25 19:10:15 +0000487 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488
489 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000497 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498}
499
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502 struct net_device *netdev = adapter->netdev;
503
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000505 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513}
514
Sathya Perla3c8def92011-06-12 20:01:58 +0000515static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
Sathya Perla3c8def92011-06-12 20:01:58 +0000518 struct be_tx_stats *stats = tx_stats(txo);
519
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 /* to account for hdr wrb */
539 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000546 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
Somnath Koturcc4ce022010-10-21 07:11:14 -0700574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000577 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700578
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000583 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700606 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000626 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000627 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000632 }
633}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
Sathya Perla3c8def92011-06-12 20:01:58 +0000635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
Sathya Perla7101e112010-03-22 20:41:12 +0000638 dma_addr_t busaddr;
639 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000644 bool map_single = false;
645 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700652 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000655 goto dma_err;
656 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000665 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700666 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000667 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000668 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000669 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000670 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000675 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
Somnath Koturcc4ce022010-10-21 07:11:14 -0700685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000693 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Stephen Hemminger613573252009-08-31 19:50:58 +0000701static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700702 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703{
704 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000711 /* For vlan tagged pkts, BE
712 * 1) calculates checksum even when CSO is not requested
713 * 2) calculates checksum wrongly for padded pkt less than
714 * 60 bytes long.
715 * As a workaround disable TX vlan offloading in such cases.
716 */
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731
Sathya Perla3c8def92011-06-12 20:01:58 +0000732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000733 if (copied) {
734 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 /* Ensure txq has space for the next skb; Else stop the queue
739 * *BEFORE* ringing the tx doorbell, so that we serialze the
740 * tx compls of the current transmit which'll wake up the queue
741 */
Sathya Perla7101e112010-03-22 20:41:12 +0000742 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000746 stopped = true;
747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000749 be_txq_notify(adapter, txq->id, wrb_cnt);
750
Sathya Perla3c8def92011-06-12 20:01:58 +0000751 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000752 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000757tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000780 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000788 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789
790 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000800 if (adapter->vlans_added > adapter->max_vlans)
801 goto set_vlan_promisc;
802
803 /* Construct VLAN Table to give to HW */
804 for (i = 0; i < VLAN_N_VID; i++)
805 if (adapter->vlan_tag[i])
806 vtag[ntags++] = cpu_to_le16(i);
807
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
810
811 /* Set to VLAN promisc mode as setting VLAN filter failed */
812 if (status) {
813 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
814 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
815 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000817
Sathya Perlab31c50a2009-09-17 10:30:13 -0700818 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000819
820set_vlan_promisc:
821 status = be_cmd_vlan_config(adapter, adapter->if_handle,
822 NULL, 0, 1, 1);
823 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824}
825
Jiri Pirko8e586132011-12-08 19:52:37 -0500826static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827{
828 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!be_physfn(adapter)) {
832 status = -EINVAL;
833 goto ret;
834 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000835
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000837 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000838 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500839
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000840 if (!status)
841 adapter->vlans_added++;
842 else
843 adapter->vlan_tag[vid] = 0;
844ret:
845 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846}
847
Jiri Pirko8e586132011-12-08 19:52:37 -0500848static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849{
850 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!be_physfn(adapter)) {
854 status = -EINVAL;
855 goto ret;
856 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000857
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000859 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500861
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000862 if (!status)
863 adapter->vlans_added--;
864 else
865 adapter->vlan_tag[vid] = 1;
866ret:
867 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868}
869
Sathya Perlaa54769f2011-10-24 02:45:00 +0000870static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871{
872 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000873 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874
875 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000876 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000877 adapter->promiscuous = true;
878 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000880
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300881 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 if (adapter->promiscuous) {
883 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000884 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000885
886 if (adapter->vlans_added)
887 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888 }
889
Sathya Perlae7b909a2009-11-22 22:01:10 +0000890 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000891 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000892 netdev_mc_count(netdev) > BE_MAX_MC) {
893 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 goto done;
895 }
896
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000897 if (netdev_uc_count(netdev) != adapter->uc_macs) {
898 struct netdev_hw_addr *ha;
899 int i = 1; /* First slot is claimed by the Primary MAC */
900
901 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
902 be_cmd_pmac_del(adapter, adapter->if_handle,
903 adapter->pmac_id[i], 0);
904 }
905
906 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
907 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
908 adapter->promiscuous = true;
909 goto done;
910 }
911
912 netdev_for_each_uc_addr(ha, adapter->netdev) {
913 adapter->uc_macs++; /* First slot is for Primary MAC */
914 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
915 adapter->if_handle,
916 &adapter->pmac_id[adapter->uc_macs], 0);
917 }
918 }
919
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000920 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
921
922 /* Set to MCAST promisc mode if setting MULTICAST address fails */
923 if (status) {
924 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
925 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
926 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
927 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000928done:
929 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930}
931
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000932static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000935 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000936 int status;
937
Sathya Perla11ac75e2011-12-13 00:58:50 +0000938 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000939 return -EPERM;
940
Sathya Perla11ac75e2011-12-13 00:58:50 +0000941 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000942 return -EINVAL;
943
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000944 if (lancer_chip(adapter)) {
945 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
946 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000947 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
948 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
951 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000952 }
953
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000954 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000955 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
956 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000958 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 return status;
961}
962
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000963static int be_get_vf_config(struct net_device *netdev, int vf,
964 struct ifla_vf_info *vi)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000970 return -EPERM;
971
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973 return -EINVAL;
974
975 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000976 vi->tx_rate = vf_cfg->tx_rate;
977 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000979 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000980
981 return 0;
982}
983
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000984static int be_set_vf_vlan(struct net_device *netdev,
985 int vf, u16 vlan, u8 qos)
986{
987 struct be_adapter *adapter = netdev_priv(netdev);
988 int status = 0;
989
Sathya Perla11ac75e2011-12-13 00:58:50 +0000990 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000991 return -EPERM;
992
Sathya Perla11ac75e2011-12-13 00:58:50 +0000993 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000994 return -EINVAL;
995
996 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000997 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
998 /* If this is new value, program it. Else skip. */
999 adapter->vf_cfg[vf].vlan_tag = vlan;
1000
1001 status = be_cmd_set_hsw_config(adapter, vlan,
1002 vf + 1, adapter->vf_cfg[vf].if_handle);
1003 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001004 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001005 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001006 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001007 vlan = adapter->vf_cfg[vf].def_vid;
1008 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1009 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010 }
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012
1013 if (status)
1014 dev_info(&adapter->pdev->dev,
1015 "VLAN %d config on VF %d failed\n", vlan, vf);
1016 return status;
1017}
1018
Ajit Khapardee1d18732010-07-23 01:52:13 +00001019static int be_set_vf_tx_rate(struct net_device *netdev,
1020 int vf, int rate)
1021{
1022 struct be_adapter *adapter = netdev_priv(netdev);
1023 int status = 0;
1024
Sathya Perla11ac75e2011-12-13 00:58:50 +00001025 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001026 return -EPERM;
1027
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001028 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001029 return -EINVAL;
1030
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001031 if (rate < 100 || rate > 10000) {
1032 dev_err(&adapter->pdev->dev,
1033 "tx rate must be between 100 and 10000 Mbps\n");
1034 return -EINVAL;
1035 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001036
Ajit Khaparde856c4012011-02-11 13:32:32 +00001037 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001038
1039 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001040 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001042 else
1043 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001044 return status;
1045}
1046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001047static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001049 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001050 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001051 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001052 u64 pkts;
1053 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001054
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001055 if (!eqo->enable_aic) {
1056 eqd = eqo->eqd;
1057 goto modify_eqd;
1058 }
1059
1060 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001061 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001063 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1064
Sathya Perla4097f662009-03-24 16:40:13 -07001065 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 if (time_before(now, stats->rx_jiffies)) {
1067 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001068 return;
1069 }
1070
Sathya Perlaac124ff2011-07-25 19:10:14 +00001071 /* Update once a second */
1072 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001073 return;
1074
Sathya Perlaab1594e2011-07-25 19:10:15 +00001075 do {
1076 start = u64_stats_fetch_begin_bh(&stats->sync);
1077 pkts = stats->rx_pkts;
1078 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1079
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001080 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001081 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001082 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001083 eqd = (stats->rx_pps / 110000) << 3;
1084 eqd = min(eqd, eqo->max_eqd);
1085 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001086 if (eqd < 10)
1087 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001088
1089modify_eqd:
1090 if (eqd != eqo->cur_eqd) {
1091 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1092 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001093 }
Sathya Perla4097f662009-03-24 16:40:13 -07001094}
1095
Sathya Perla3abcded2010-10-03 22:12:27 -07001096static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001098{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001099 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001100
Sathya Perlaab1594e2011-07-25 19:10:15 +00001101 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001102 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001104 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001105 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001106 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001109 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110}
1111
Sathya Perla2e588f82011-03-11 02:49:26 +00001112static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001113{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001114 /* L4 checksum is not reliable for non TCP/UDP packets.
1115 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1117 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001118}
1119
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001120static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1121 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001123 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
Sathya Perla3abcded2010-10-03 22:12:27 -07001127 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 BUG_ON(!rx_page_info->page);
1129
Ajit Khaparde205859a2010-02-09 01:34:21 +00001130 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001131 dma_unmap_page(&adapter->pdev->dev,
1132 dma_unmap_addr(rx_page_info, bus),
1133 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001134 rx_page_info->last_page_user = false;
1135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136
1137 atomic_dec(&rxq->used);
1138 return rx_page_info;
1139}
1140
1141/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142static void be_rx_compl_discard(struct be_rx_obj *rxo,
1143 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
Sathya Perla3abcded2010-10-03 22:12:27 -07001145 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001147 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001149 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001150 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001151 put_page(page_info->page);
1152 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001153 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154 }
1155}
1156
1157/*
1158 * skb_fill_rx_data forms a complete skb for an ether frame
1159 * indicated by rxcp.
1160 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001161static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1162 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163{
Sathya Perla3abcded2010-10-03 22:12:27 -07001164 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001166 u16 i, j;
1167 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 u8 *start;
1169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171 start = page_address(page_info->page) + page_info->page_offset;
1172 prefetch(start);
1173
1174 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001175 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
1177 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001178 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 memcpy(skb->data, start, hdr_len);
1180 skb->len = curr_frag_len;
1181 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1182 /* Complete packet has now been moved to data */
1183 put_page(page_info->page);
1184 skb->data_len = 0;
1185 skb->tail += curr_frag_len;
1186 } else {
1187 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001188 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 skb_shinfo(skb)->frags[0].page_offset =
1190 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001191 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001193 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 skb->tail += hdr_len;
1195 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001196 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 if (rxcp->pkt_size <= rx_frag_size) {
1199 BUG_ON(rxcp->num_rcvd != 1);
1200 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 }
1202
1203 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001204 index_inc(&rxcp->rxq_idx, rxq->len);
1205 remaining = rxcp->pkt_size - curr_frag_len;
1206 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001207 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001210 /* Coalesce all frags from the same physical page in one slot */
1211 if (page_info->page_offset == 0) {
1212 /* Fresh page */
1213 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001214 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001215 skb_shinfo(skb)->frags[j].page_offset =
1216 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001217 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001218 skb_shinfo(skb)->nr_frags++;
1219 } else {
1220 put_page(page_info->page);
1221 }
1222
Eric Dumazet9e903e02011-10-18 21:00:24 +00001223 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 skb->len += curr_frag_len;
1225 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001226 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 remaining -= curr_frag_len;
1228 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001229 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001231 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232}
1233
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001234/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001235static void be_rx_compl_process(struct be_rx_obj *rxo,
1236 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001238 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001239 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001241
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001242 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001243 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001244 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001245 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 return;
1247 }
1248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001249 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001251 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001252 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001253 else
1254 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001256 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001257 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001258 skb->rxhash = rxcp->rss_hash;
1259
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Jiri Pirko343e43c2011-08-25 02:50:51 +00001261 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001262 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1263
1264 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265}
1266
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001267/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001268void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1269 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001273 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001274 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001275 u16 remaining, curr_frag_len;
1276 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001278 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001279 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001280 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001281 return;
1282 }
1283
Sathya Perla2e588f82011-03-11 02:49:26 +00001284 remaining = rxcp->pkt_size;
1285 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287
1288 curr_frag_len = min(remaining, rx_frag_size);
1289
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001290 /* Coalesce all frags from the same physical page in one slot */
1291 if (i == 0 || page_info->page_offset == 0) {
1292 /* First frag or Fresh page */
1293 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001294 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001295 skb_shinfo(skb)->frags[j].page_offset =
1296 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001297 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001298 } else {
1299 put_page(page_info->page);
1300 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001302 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001304 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 memset(page_info, 0, sizeof(*page_info));
1306 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001307 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001309 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001310 skb->len = rxcp->pkt_size;
1311 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001312 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 if (adapter->netdev->features & NETIF_F_RXHASH)
1314 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001319 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1323 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324{
Sathya Perla2e588f82011-03-11 02:49:26 +00001325 rxcp->pkt_size =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1327 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1328 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1329 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001330 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 rxcp->ip_csum =
1332 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1333 rxcp->l4_csum =
1334 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1335 rxcp->ipv6 =
1336 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1337 rxcp->rxq_idx =
1338 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1339 rxcp->num_rcvd =
1340 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1341 rxcp->pkt_type =
1342 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001343 rxcp->rss_hash =
1344 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001345 if (rxcp->vlanf) {
1346 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001347 compl);
1348 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1349 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001350 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001351 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001352}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001354static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1355 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001356{
1357 rxcp->pkt_size =
1358 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1359 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1360 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1361 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001362 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 rxcp->ip_csum =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1365 rxcp->l4_csum =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1367 rxcp->ipv6 =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1369 rxcp->rxq_idx =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1371 rxcp->num_rcvd =
1372 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1373 rxcp->pkt_type =
1374 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001375 rxcp->rss_hash =
1376 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001377 if (rxcp->vlanf) {
1378 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001379 compl);
1380 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1381 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001382 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001383 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001384}
1385
1386static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1387{
1388 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1389 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1390 struct be_adapter *adapter = rxo->adapter;
1391
1392 /* For checking the valid bit it is Ok to use either definition as the
1393 * valid bit is at the same position in both v0 and v1 Rx compl */
1394 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 return NULL;
1396
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001397 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001398 be_dws_le_to_cpu(compl, sizeof(*compl));
1399
1400 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001401 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001402 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001403 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001404
Sathya Perla15d72182011-03-21 20:49:26 +00001405 if (rxcp->vlanf) {
1406 /* vlanf could be wrongly set in some cards.
1407 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001408 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001409 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001410
Sathya Perla15d72182011-03-21 20:49:26 +00001411 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001412 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001413
Somnath Kotur939cf302011-08-18 21:51:49 -07001414 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001415 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001416 rxcp->vlanf = 0;
1417 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001418
1419 /* As the compl has been parsed, reset it; we wont touch it again */
1420 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Sathya Perla3abcded2010-10-03 22:12:27 -07001422 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 return rxcp;
1424}
1425
Eric Dumazet1829b082011-03-01 05:48:12 +00001426static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001429
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001431 gfp |= __GFP_COMP;
1432 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433}
1434
1435/*
1436 * Allocate a page, split it to fragments of size rx_frag_size and post as
1437 * receive buffers to BE
1438 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001439static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440{
Sathya Perla3abcded2010-10-03 22:12:27 -07001441 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001442 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001443 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 struct page *pagep = NULL;
1445 struct be_eth_rx_d *rxd;
1446 u64 page_dmaaddr = 0, frag_dmaaddr;
1447 u32 posted, page_offset = 0;
1448
Sathya Perla3abcded2010-10-03 22:12:27 -07001449 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1451 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001452 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001454 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 break;
1456 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001457 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1458 0, adapter->big_page_size,
1459 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 page_info->page_offset = 0;
1461 } else {
1462 get_page(pagep);
1463 page_info->page_offset = page_offset + rx_frag_size;
1464 }
1465 page_offset = page_info->page_offset;
1466 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001467 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1469
1470 rxd = queue_head_node(rxq);
1471 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1472 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
1474 /* Any space left in the current big page for another frag? */
1475 if ((page_offset + rx_frag_size + rx_frag_size) >
1476 adapter->big_page_size) {
1477 pagep = NULL;
1478 page_info->last_page_user = true;
1479 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001480
1481 prev_page_info = page_info;
1482 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001483 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 }
1485 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001486 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487
1488 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001490 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001491 } else if (atomic_read(&rxq->used) == 0) {
1492 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001493 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495}
1496
Sathya Perla5fb379e2009-06-18 00:02:59 +00001497static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1500
1501 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1502 return NULL;
1503
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001504 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1506
1507 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1508
1509 queue_tail_inc(tx_cq);
1510 return txcp;
1511}
1512
Sathya Perla3c8def92011-06-12 20:01:58 +00001513static u16 be_tx_compl_process(struct be_adapter *adapter,
1514 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515{
Sathya Perla3c8def92011-06-12 20:01:58 +00001516 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001517 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001518 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001520 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1521 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001523 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001525 sent_skbs[txq->tail] = NULL;
1526
1527 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001528 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001530 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001532 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001533 unmap_tx_frag(&adapter->pdev->dev, wrb,
1534 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001535 unmap_skb_hdr = false;
1536
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 num_wrbs++;
1538 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001539 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001542 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543}
1544
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001545/* Return the number of events in the event queue */
1546static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001547{
1548 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001549 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001550
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 do {
1552 eqe = queue_tail_node(&eqo->q);
1553 if (eqe->evt == 0)
1554 break;
1555
1556 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001557 eqe->evt = 0;
1558 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559 queue_tail_inc(&eqo->q);
1560 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001561
1562 return num;
1563}
1564
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001565static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001566{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001567 bool rearm = false;
1568 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570 /* Deal with any spurious interrupts that come without events */
1571 if (!num)
1572 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001575 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001576 napi_schedule(&eqo->napi);
1577
1578 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001579}
1580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001581/* Leaves the EQ is disarmed state */
1582static void be_eq_clean(struct be_eq_obj *eqo)
1583{
1584 int num = events_get(eqo);
1585
1586 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1587}
1588
1589static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590{
1591 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001592 struct be_queue_info *rxq = &rxo->q;
1593 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001594 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 u16 tail;
1596
1597 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001598 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001599 be_rx_compl_discard(rxo, rxcp);
1600 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 }
1602
1603 /* Then free posted rx buffer that were not used */
1604 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001605 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 put_page(page_info->page);
1608 memset(page_info, 0, sizeof(*page_info));
1609 }
1610 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001611 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612}
1613
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001614static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001616 struct be_tx_obj *txo;
1617 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001618 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001619 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001620 struct sk_buff *sent_skb;
1621 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001622 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623
Sathya Perlaa8e91792009-08-10 03:42:43 +00001624 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1625 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001626 pending_txqs = adapter->num_tx_qs;
1627
1628 for_all_tx_queues(adapter, txo, i) {
1629 txq = &txo->q;
1630 while ((txcp = be_tx_compl_get(&txo->cq))) {
1631 end_idx =
1632 AMAP_GET_BITS(struct amap_eth_tx_compl,
1633 wrb_index, txcp);
1634 num_wrbs += be_tx_compl_process(adapter, txo,
1635 end_idx);
1636 cmpl++;
1637 }
1638 if (cmpl) {
1639 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1640 atomic_sub(num_wrbs, &txq->used);
1641 cmpl = 0;
1642 num_wrbs = 0;
1643 }
1644 if (atomic_read(&txq->used) == 0)
1645 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001646 }
1647
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001648 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001649 break;
1650
1651 mdelay(1);
1652 } while (true);
1653
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001654 for_all_tx_queues(adapter, txo, i) {
1655 txq = &txo->q;
1656 if (atomic_read(&txq->used))
1657 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1658 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001659
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001660 /* free posted tx for which compls will never arrive */
1661 while (atomic_read(&txq->used)) {
1662 sent_skb = txo->sent_skb_list[txq->tail];
1663 end_idx = txq->tail;
1664 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1665 &dummy_wrb);
1666 index_adv(&end_idx, num_wrbs - 1, txq->len);
1667 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1668 atomic_sub(num_wrbs, &txq->used);
1669 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001670 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671}
1672
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001673static void be_evt_queues_destroy(struct be_adapter *adapter)
1674{
1675 struct be_eq_obj *eqo;
1676 int i;
1677
1678 for_all_evt_queues(adapter, eqo, i) {
1679 be_eq_clean(eqo);
1680 if (eqo->q.created)
1681 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1682 be_queue_free(adapter, &eqo->q);
1683 }
1684}
1685
1686static int be_evt_queues_create(struct be_adapter *adapter)
1687{
1688 struct be_queue_info *eq;
1689 struct be_eq_obj *eqo;
1690 int i, rc;
1691
1692 adapter->num_evt_qs = num_irqs(adapter);
1693
1694 for_all_evt_queues(adapter, eqo, i) {
1695 eqo->adapter = adapter;
1696 eqo->tx_budget = BE_TX_BUDGET;
1697 eqo->idx = i;
1698 eqo->max_eqd = BE_MAX_EQD;
1699 eqo->enable_aic = true;
1700
1701 eq = &eqo->q;
1702 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1703 sizeof(struct be_eq_entry));
1704 if (rc)
1705 return rc;
1706
1707 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1708 if (rc)
1709 return rc;
1710 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001711 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001712}
1713
Sathya Perla5fb379e2009-06-18 00:02:59 +00001714static void be_mcc_queues_destroy(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001717
Sathya Perla8788fdc2009-07-27 22:52:03 +00001718 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001719 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001720 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721 be_queue_free(adapter, q);
1722
Sathya Perla8788fdc2009-07-27 22:52:03 +00001723 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001724 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001725 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001726 be_queue_free(adapter, q);
1727}
1728
1729/* Must be called only after TX qs are created as MCC shares TX EQ */
1730static int be_mcc_queues_create(struct be_adapter *adapter)
1731{
1732 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001733
Sathya Perla8788fdc2009-07-27 22:52:03 +00001734 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001735 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001736 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001737 goto err;
1738
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739 /* Use the default EQ for MCC completions */
1740 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001741 goto mcc_cq_free;
1742
Sathya Perla8788fdc2009-07-27 22:52:03 +00001743 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1745 goto mcc_cq_destroy;
1746
Sathya Perla8788fdc2009-07-27 22:52:03 +00001747 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001748 goto mcc_q_free;
1749
1750 return 0;
1751
1752mcc_q_free:
1753 be_queue_free(adapter, q);
1754mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001755 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001756mcc_cq_free:
1757 be_queue_free(adapter, cq);
1758err:
1759 return -1;
1760}
1761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762static void be_tx_queues_destroy(struct be_adapter *adapter)
1763{
1764 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001765 struct be_tx_obj *txo;
1766 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767
Sathya Perla3c8def92011-06-12 20:01:58 +00001768 for_all_tx_queues(adapter, txo, i) {
1769 q = &txo->q;
1770 if (q->created)
1771 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1772 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773
Sathya Perla3c8def92011-06-12 20:01:58 +00001774 q = &txo->cq;
1775 if (q->created)
1776 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777 be_queue_free(adapter, q);
1778 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779}
1780
Sathya Perladafc0fe2011-10-24 02:45:02 +00001781static int be_num_txqs_want(struct be_adapter *adapter)
1782{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001783 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001784 lancer_chip(adapter) || !be_physfn(adapter) ||
1785 adapter->generation == BE_GEN2)
1786 return 1;
1787 else
1788 return MAX_TX_QS;
1789}
1790
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001793 struct be_queue_info *cq, *eq;
1794 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001795 struct be_tx_obj *txo;
1796 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797
Sathya Perladafc0fe2011-10-24 02:45:02 +00001798 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001799 if (adapter->num_tx_qs != MAX_TX_QS) {
1800 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001801 netif_set_real_num_tx_queues(adapter->netdev,
1802 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001803 rtnl_unlock();
1804 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001805
Sathya Perla3c8def92011-06-12 20:01:58 +00001806 for_all_tx_queues(adapter, txo, i) {
1807 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001808 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1809 sizeof(struct be_eth_tx_compl));
1810 if (status)
1811 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813 /* If num_evt_qs is less than num_tx_qs, then more than
1814 * one txq share an eq
1815 */
1816 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1817 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1818 if (status)
1819 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822}
1823
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001824static int be_tx_qs_create(struct be_adapter *adapter)
1825{
1826 struct be_tx_obj *txo;
1827 int i, status;
1828
1829 for_all_tx_queues(adapter, txo, i) {
1830 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1831 sizeof(struct be_eth_wrb));
1832 if (status)
1833 return status;
1834
1835 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1836 if (status)
1837 return status;
1838 }
1839
1840 return 0;
1841}
1842
1843static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844{
1845 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001846 struct be_rx_obj *rxo;
1847 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848
Sathya Perla3abcded2010-10-03 22:12:27 -07001849 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001850 q = &rxo->cq;
1851 if (q->created)
1852 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1853 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855}
1856
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001857static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001858{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001859 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001860 struct be_rx_obj *rxo;
1861 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001863 /* We'll create as many RSS rings as there are irqs.
1864 * But when there's only one irq there's no use creating RSS rings
1865 */
1866 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1867 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001868
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001870 for_all_rx_queues(adapter, rxo, i) {
1871 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001872 cq = &rxo->cq;
1873 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1874 sizeof(struct be_eth_rx_compl));
1875 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001878 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1879 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001881 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884 if (adapter->num_rx_qs != MAX_RX_QS)
1885 dev_info(&adapter->pdev->dev,
1886 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001889}
1890
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891static irqreturn_t be_intx(int irq, void *dev)
1892{
1893 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001894 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001896 /* With INTx only one EQ is used */
1897 num_evts = event_handle(&adapter->eq_obj[0]);
1898 if (num_evts)
1899 return IRQ_HANDLED;
1900 else
1901 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902}
1903
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001904static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 return IRQ_HANDLED;
1910}
1911
Sathya Perla2e588f82011-03-11 02:49:26 +00001912static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913{
Sathya Perla2e588f82011-03-11 02:49:26 +00001914 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915}
1916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1918 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919{
Sathya Perla3abcded2010-10-03 22:12:27 -07001920 struct be_adapter *adapter = rxo->adapter;
1921 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001922 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 u32 work_done;
1924
1925 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001926 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927 if (!rxcp)
1928 break;
1929
Sathya Perla12004ae2011-08-02 19:57:46 +00001930 /* Is it a flush compl that has no data */
1931 if (unlikely(rxcp->num_rcvd == 0))
1932 goto loop_continue;
1933
1934 /* Discard compl with partial DMA Lancer B0 */
1935 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001936 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001937 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001938 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001939
Sathya Perla12004ae2011-08-02 19:57:46 +00001940 /* On BE drop pkts that arrive due to imperfect filtering in
1941 * promiscuous mode on some skews
1942 */
1943 if (unlikely(rxcp->port != adapter->port_num &&
1944 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001946 goto loop_continue;
1947 }
1948
1949 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001951 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001953loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001954 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 }
1956
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 if (work_done) {
1958 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1961 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001963
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 return work_done;
1965}
1966
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001967static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1968 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001973 for (work_done = 0; work_done < budget; work_done++) {
1974 txcp = be_tx_compl_get(&txo->cq);
1975 if (!txcp)
1976 break;
1977 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001978 AMAP_GET_BITS(struct amap_eth_tx_compl,
1979 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001980 }
1981
1982 if (work_done) {
1983 be_cq_notify(adapter, txo->cq.id, true, work_done);
1984 atomic_sub(num_wrbs, &txo->q.used);
1985
1986 /* As Tx wrbs have been freed up, wake up netdev queue
1987 * if it was stopped due to lack of tx wrbs. */
1988 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1989 atomic_read(&txo->q.used) < txo->q.len / 2) {
1990 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001991 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1994 tx_stats(txo)->tx_compl += work_done;
1995 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1996 }
1997 return (work_done < budget); /* Done */
1998}
Sathya Perla3c8def92011-06-12 20:01:58 +00001999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000int be_poll(struct napi_struct *napi, int budget)
2001{
2002 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2003 struct be_adapter *adapter = eqo->adapter;
2004 int max_work = 0, work, i;
2005 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007 /* Process all TXQs serviced by this EQ */
2008 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2009 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2010 eqo->tx_budget, i);
2011 if (!tx_done)
2012 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 }
2014
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 /* This loop will iterate twice for EQ0 in which
2016 * completions of the last RXQ (default one) are also processed
2017 * For other EQs the loop iterates only once
2018 */
2019 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2020 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2021 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002022 }
2023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 if (is_mcc_eqo(eqo))
2025 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002026
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027 if (max_work < budget) {
2028 napi_complete(napi);
2029 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2030 } else {
2031 /* As we'll continue in polling mode, count and clear events */
2032 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002033 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035}
2036
Ajit Khaparded053de92010-09-03 06:23:30 +00002037void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002038{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002039 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2040 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002041 u32 i;
2042
Sathya Perla72f02482011-11-10 19:17:58 +00002043 if (adapter->eeh_err || adapter->ue_detected)
2044 return;
2045
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002046 if (lancer_chip(adapter)) {
2047 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2048 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2049 sliport_err1 = ioread32(adapter->db +
2050 SLIPORT_ERROR1_OFFSET);
2051 sliport_err2 = ioread32(adapter->db +
2052 SLIPORT_ERROR2_OFFSET);
2053 }
2054 } else {
2055 pci_read_config_dword(adapter->pdev,
2056 PCICFG_UE_STATUS_LOW, &ue_lo);
2057 pci_read_config_dword(adapter->pdev,
2058 PCICFG_UE_STATUS_HIGH, &ue_hi);
2059 pci_read_config_dword(adapter->pdev,
2060 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2061 pci_read_config_dword(adapter->pdev,
2062 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002063
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002064 ue_lo = (ue_lo & (~ue_lo_mask));
2065 ue_hi = (ue_hi & (~ue_hi_mask));
2066 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002067
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002068 if (ue_lo || ue_hi ||
2069 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002070 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002071 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002072 dev_err(&adapter->pdev->dev,
2073 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002074 }
2075
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002076 if (ue_lo) {
2077 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2078 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002079 dev_err(&adapter->pdev->dev,
2080 "UE: %s bit set\n", ue_status_low_desc[i]);
2081 }
2082 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002083 if (ue_hi) {
2084 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2085 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002086 dev_err(&adapter->pdev->dev,
2087 "UE: %s bit set\n", ue_status_hi_desc[i]);
2088 }
2089 }
2090
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002091 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2092 dev_err(&adapter->pdev->dev,
2093 "sliport status 0x%x\n", sliport_status);
2094 dev_err(&adapter->pdev->dev,
2095 "sliport error1 0x%x\n", sliport_err1);
2096 dev_err(&adapter->pdev->dev,
2097 "sliport error2 0x%x\n", sliport_err2);
2098 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002099}
2100
Sathya Perla8d56ff12009-11-22 22:02:26 +00002101static void be_msix_disable(struct be_adapter *adapter)
2102{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002103 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002104 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 }
2107}
2108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109static uint be_num_rss_want(struct be_adapter *adapter)
2110{
2111 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2112 adapter->num_vfs == 0 && be_physfn(adapter) &&
2113 !be_is_mc(adapter))
2114 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2115 else
2116 return 0;
2117}
2118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119static void be_msix_enable(struct be_adapter *adapter)
2120{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002122 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 /* If RSS queues are not used, need a vec for default RX Q */
2125 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2126 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002127
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002128 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129 adapter->msix_entries[i].entry = i;
2130
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002131 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 if (status == 0) {
2133 goto done;
2134 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002136 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002138 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 }
2140 return;
2141done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002142 adapter->num_msix_vec = num_vec;
2143 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144}
2145
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002146static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002147{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002148 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002149
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002150#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002151 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002152 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002153 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002154
2155 pos = pci_find_ext_capability(adapter->pdev,
2156 PCI_EXT_CAP_ID_SRIOV);
2157 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002158 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002159
Sathya Perla11ac75e2011-12-13 00:58:50 +00002160 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2161 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002162 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002163 "Device supports %d VFs and not %d\n",
2164 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002165
Sathya Perla11ac75e2011-12-13 00:58:50 +00002166 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2167 if (status)
2168 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002169
Sathya Perla11ac75e2011-12-13 00:58:50 +00002170 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002171 adapter->vf_cfg = kcalloc(num_vfs,
2172 sizeof(struct be_vf_cfg),
2173 GFP_KERNEL);
2174 if (!adapter->vf_cfg)
2175 return -ENOMEM;
2176 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002177 }
2178#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002179 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002180}
2181
2182static void be_sriov_disable(struct be_adapter *adapter)
2183{
2184#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002185 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002186 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002187 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002188 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002189 }
2190#endif
2191}
2192
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002193static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197}
2198
2199static int be_msix_register(struct be_adapter *adapter)
2200{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 struct net_device *netdev = adapter->netdev;
2202 struct be_eq_obj *eqo;
2203 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 for_all_evt_queues(adapter, eqo, i) {
2206 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2207 vec = be_msix_vec_get(adapter, eqo);
2208 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 if (status)
2210 goto err_msix;
2211 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002212
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002214err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2216 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2217 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2218 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002219 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 return status;
2221}
2222
2223static int be_irq_register(struct be_adapter *adapter)
2224{
2225 struct net_device *netdev = adapter->netdev;
2226 int status;
2227
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002228 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 status = be_msix_register(adapter);
2230 if (status == 0)
2231 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002232 /* INTx is not supported for VF */
2233 if (!be_physfn(adapter))
2234 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235 }
2236
2237 /* INTx */
2238 netdev->irq = adapter->pdev->irq;
2239 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2240 adapter);
2241 if (status) {
2242 dev_err(&adapter->pdev->dev,
2243 "INTx request IRQ failed - err %d\n", status);
2244 return status;
2245 }
2246done:
2247 adapter->isr_registered = true;
2248 return 0;
2249}
2250
2251static void be_irq_unregister(struct be_adapter *adapter)
2252{
2253 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256
2257 if (!adapter->isr_registered)
2258 return;
2259
2260 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002261 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 free_irq(netdev->irq, adapter);
2263 goto done;
2264 }
2265
2266 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002267 for_all_evt_queues(adapter, eqo, i)
2268 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002269
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270done:
2271 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272}
2273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002275{
2276 struct be_queue_info *q;
2277 struct be_rx_obj *rxo;
2278 int i;
2279
2280 for_all_rx_queues(adapter, rxo, i) {
2281 q = &rxo->q;
2282 if (q->created) {
2283 be_cmd_rxq_destroy(adapter, q);
2284 /* After the rxq is invalidated, wait for a grace time
2285 * of 1ms for all dma to end and the flush compl to
2286 * arrive
2287 */
2288 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002290 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002292 }
2293}
2294
Sathya Perla889cd4b2010-05-30 23:33:45 +00002295static int be_close(struct net_device *netdev)
2296{
2297 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 struct be_eq_obj *eqo;
2299 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002300
Sathya Perla889cd4b2010-05-30 23:33:45 +00002301 be_async_mcc_disable(adapter);
2302
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002303 if (!lancer_chip(adapter))
2304 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306 for_all_evt_queues(adapter, eqo, i) {
2307 napi_disable(&eqo->napi);
2308 if (msix_enabled(adapter))
2309 synchronize_irq(be_msix_vec_get(adapter, eqo));
2310 else
2311 synchronize_irq(netdev->irq);
2312 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002313 }
2314
Sathya Perla889cd4b2010-05-30 23:33:45 +00002315 be_irq_unregister(adapter);
2316
Sathya Perla889cd4b2010-05-30 23:33:45 +00002317 /* Wait for all pending tx completions to arrive so that
2318 * all tx skbs are freed.
2319 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002320 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002321
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002323 return 0;
2324}
2325
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002327{
2328 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002329 int rc, i, j;
2330 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002331
2332 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2334 sizeof(struct be_eth_rx_d));
2335 if (rc)
2336 return rc;
2337 }
2338
2339 /* The FW would like the default RXQ to be created first */
2340 rxo = default_rxo(adapter);
2341 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2342 adapter->if_handle, false, &rxo->rss_id);
2343 if (rc)
2344 return rc;
2345
2346 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002347 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 rx_frag_size, adapter->if_handle,
2349 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002350 if (rc)
2351 return rc;
2352 }
2353
2354 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002355 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2356 for_all_rss_queues(adapter, rxo, i) {
2357 if ((j + i) >= 128)
2358 break;
2359 rsstable[j + i] = rxo->rss_id;
2360 }
2361 }
2362 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002363 if (rc)
2364 return rc;
2365 }
2366
2367 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002369 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002370 return 0;
2371}
2372
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373static int be_open(struct net_device *netdev)
2374{
2375 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002379 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002380 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002383 if (status)
2384 goto err;
2385
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386 be_irq_register(adapter);
2387
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002388 if (!lancer_chip(adapter))
2389 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002390
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002392 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002394 for_all_tx_queues(adapter, txo, i)
2395 be_cq_notify(adapter, txo->cq.id, true, 0);
2396
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002397 be_async_mcc_enable(adapter);
2398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 for_all_evt_queues(adapter, eqo, i) {
2400 napi_enable(&eqo->napi);
2401 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2402 }
2403
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002404 status = be_cmd_link_status_query(adapter, NULL, NULL,
2405 &link_status, 0);
2406 if (!status)
2407 be_link_status_update(adapter, link_status);
2408
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 return 0;
2410err:
2411 be_close(adapter->netdev);
2412 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002413}
2414
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002415static int be_setup_wol(struct be_adapter *adapter, bool enable)
2416{
2417 struct be_dma_mem cmd;
2418 int status = 0;
2419 u8 mac[ETH_ALEN];
2420
2421 memset(mac, 0, ETH_ALEN);
2422
2423 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002424 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2425 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002426 if (cmd.va == NULL)
2427 return -1;
2428 memset(cmd.va, 0, cmd.size);
2429
2430 if (enable) {
2431 status = pci_write_config_dword(adapter->pdev,
2432 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2433 if (status) {
2434 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002435 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002436 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2437 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002438 return status;
2439 }
2440 status = be_cmd_enable_magic_wol(adapter,
2441 adapter->netdev->dev_addr, &cmd);
2442 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2443 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2444 } else {
2445 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2446 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2447 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2448 }
2449
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002450 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002451 return status;
2452}
2453
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002454/*
2455 * Generate a seed MAC address from the PF MAC Address using jhash.
2456 * MAC Address for VFs are assigned incrementally starting from the seed.
2457 * These addresses are programmed in the ASIC by the PF and the VF driver
2458 * queries for the MAC address during its probe.
2459 */
2460static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2461{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002462 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002463 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002464 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002465 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002466
2467 be_vf_eth_addr_generate(adapter, mac);
2468
Sathya Perla11ac75e2011-12-13 00:58:50 +00002469 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002470 if (lancer_chip(adapter)) {
2471 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2472 } else {
2473 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002474 vf_cfg->if_handle,
2475 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002476 }
2477
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002478 if (status)
2479 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002480 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002481 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002482 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002483
2484 mac[5] += 1;
2485 }
2486 return status;
2487}
2488
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002489static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002491 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002492 u32 vf;
2493
Sathya Perla11ac75e2011-12-13 00:58:50 +00002494 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002495 if (lancer_chip(adapter))
2496 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2497 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002498 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2499 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002500
Sathya Perla11ac75e2011-12-13 00:58:50 +00002501 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2502 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002503}
2504
Sathya Perlaa54769f2011-10-24 02:45:00 +00002505static int be_clear(struct be_adapter *adapter)
2506{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002507 int i = 1;
2508
Sathya Perla191eb752012-02-23 18:50:13 +00002509 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2510 cancel_delayed_work_sync(&adapter->work);
2511 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2512 }
2513
Sathya Perla11ac75e2011-12-13 00:58:50 +00002514 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002515 be_vf_clear(adapter);
2516
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002517 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2518 be_cmd_pmac_del(adapter, adapter->if_handle,
2519 adapter->pmac_id[i], 0);
2520
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002521 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002522
2523 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002525 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002527
2528 /* tell fw we're done with firing cmds */
2529 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002530
2531 be_msix_disable(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002532 kfree(adapter->pmac_id);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002533 return 0;
2534}
2535
Sathya Perla30128032011-11-10 19:17:57 +00002536static void be_vf_setup_init(struct be_adapter *adapter)
2537{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002538 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002539 int vf;
2540
Sathya Perla11ac75e2011-12-13 00:58:50 +00002541 for_all_vfs(adapter, vf_cfg, vf) {
2542 vf_cfg->if_handle = -1;
2543 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002544 }
2545}
2546
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002547static int be_vf_setup(struct be_adapter *adapter)
2548{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002549 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002550 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002551 u16 def_vlan, lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002552 int status;
2553
Sathya Perla30128032011-11-10 19:17:57 +00002554 be_vf_setup_init(adapter);
2555
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002556 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2557 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002558 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002559 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002561 if (status)
2562 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002563 }
2564
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002565 status = be_vf_eth_addr_config(adapter);
2566 if (status)
2567 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568
Sathya Perla11ac75e2011-12-13 00:58:50 +00002569 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002571 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 if (status)
2573 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002574 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002575
2576 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2577 vf + 1, vf_cfg->if_handle);
2578 if (status)
2579 goto err;
2580 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581 }
2582 return 0;
2583err:
2584 return status;
2585}
2586
Sathya Perla30128032011-11-10 19:17:57 +00002587static void be_setup_init(struct be_adapter *adapter)
2588{
2589 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002590 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002591 adapter->if_handle = -1;
2592 adapter->be3_native = false;
2593 adapter->promiscuous = false;
2594 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002595 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002596}
2597
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002598static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002599{
2600 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002601 int status;
2602 bool pmac_id_active;
2603
2604 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2605 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002606 if (status != 0)
2607 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002608
2609 if (pmac_id_active) {
2610 status = be_cmd_mac_addr_query(adapter, mac,
2611 MAC_ADDRESS_TYPE_NETWORK,
2612 false, adapter->if_handle, pmac_id);
2613
2614 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002615 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002616 } else {
2617 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002618 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002619 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002620do_none:
2621 return status;
2622}
2623
Sathya Perla5fb379e2009-06-18 00:02:59 +00002624static int be_setup(struct be_adapter *adapter)
2625{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002626 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002627 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002628 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002630 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631
Sathya Perla30128032011-11-10 19:17:57 +00002632 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002633
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002634 be_cmd_req_native_mode(adapter);
2635
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 be_msix_enable(adapter);
2637
2638 status = be_evt_queues_create(adapter);
2639 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002640 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 status = be_tx_cqs_create(adapter);
2643 if (status)
2644 goto err;
2645
2646 status = be_rx_cqs_create(adapter);
2647 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002648 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002652 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002654 memset(mac, 0, ETH_ALEN);
2655 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002656 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002657 if (status)
2658 return status;
2659 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2660 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2661
2662 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2663 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2664 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002665 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2666
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002667 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2668 cap_flags |= BE_IF_FLAGS_RSS;
2669 en_flags |= BE_IF_FLAGS_RSS;
2670 }
2671 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2672 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002673 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002674 if (status != 0)
2675 goto err;
2676
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002677 /* The VF's permanent mac queried from card is incorrect.
2678 * For BEx: Query the mac configued by the PF using if_handle
2679 * For Lancer: Get and use mac_list to obtain mac address.
2680 */
2681 if (!be_physfn(adapter)) {
2682 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002683 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002684 else
2685 status = be_cmd_mac_addr_query(adapter, mac,
2686 MAC_ADDRESS_TYPE_NETWORK, false,
2687 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002688 if (!status) {
2689 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2690 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2691 }
2692 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002693
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002694 status = be_tx_qs_create(adapter);
2695 if (status)
2696 goto err;
2697
Sathya Perla04b71172011-09-27 13:30:27 -04002698 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002699
Sathya Perlaa54769f2011-10-24 02:45:00 +00002700 status = be_vid_config(adapter, false, 0);
2701 if (status)
2702 goto err;
2703
2704 be_set_rx_mode(adapter->netdev);
2705
2706 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002707 /* For Lancer: It is legal for this cmd to fail on VF */
2708 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002709 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002710
Sathya Perlaa54769f2011-10-24 02:45:00 +00002711 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2712 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2713 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714 /* For Lancer: It is legal for this cmd to fail on VF */
2715 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002716 goto err;
2717 }
2718
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002719 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720
Sathya Perla11ac75e2011-12-13 00:58:50 +00002721 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002722 status = be_vf_setup(adapter);
2723 if (status)
2724 goto err;
2725 }
2726
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002727 be_cmd_get_phy_info(adapter);
2728 if (be_pause_supported(adapter))
2729 adapter->phy.fc_autoneg = 1;
2730
Sathya Perla191eb752012-02-23 18:50:13 +00002731 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2732 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2733
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002734 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002735err:
2736 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737 return status;
2738}
2739
Ivan Vecera66268732011-12-08 01:31:21 +00002740#ifdef CONFIG_NET_POLL_CONTROLLER
2741static void be_netpoll(struct net_device *netdev)
2742{
2743 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002744 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002745 int i;
2746
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 for_all_evt_queues(adapter, eqo, i)
2748 event_handle(eqo);
2749
2750 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002751}
2752#endif
2753
Ajit Khaparde84517482009-09-04 03:12:16 +00002754#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002755static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002756 const u8 *p, u32 img_start, int image_size,
2757 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002758{
2759 u32 crc_offset;
2760 u8 flashed_crc[4];
2761 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002762
2763 crc_offset = hdr_size + img_start + image_size - 4;
2764
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002765 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002766
2767 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002768 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002769 if (status) {
2770 dev_err(&adapter->pdev->dev,
2771 "could not get crc from flash, not flashing redboot\n");
2772 return false;
2773 }
2774
2775 /*update redboot only if crc does not match*/
2776 if (!memcmp(flashed_crc, p, 4))
2777 return false;
2778 else
2779 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002780}
2781
Sathya Perla306f1342011-08-02 19:57:45 +00002782static bool phy_flashing_required(struct be_adapter *adapter)
2783{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002784 return (adapter->phy.phy_type == TN_8022 &&
2785 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002786}
2787
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002788static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002789 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002790 struct be_dma_mem *flash_cmd, int num_of_images)
2791
Ajit Khaparde84517482009-09-04 03:12:16 +00002792{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002793 int status = 0, i, filehdr_size = 0;
2794 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002795 int num_bytes;
2796 const u8 *p = fw->data;
2797 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002798 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002799 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002800
Sathya Perla306f1342011-08-02 19:57:45 +00002801 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002802 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2803 FLASH_IMAGE_MAX_SIZE_g3},
2804 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2805 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2806 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2807 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2808 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2809 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2810 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2811 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2812 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2813 FLASH_IMAGE_MAX_SIZE_g3},
2814 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2815 FLASH_IMAGE_MAX_SIZE_g3},
2816 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002817 FLASH_IMAGE_MAX_SIZE_g3},
2818 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002819 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2820 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2821 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002822 };
Joe Perches215faf92010-12-21 02:16:10 -08002823 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002824 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2825 FLASH_IMAGE_MAX_SIZE_g2},
2826 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2827 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2828 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2829 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2830 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2831 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2832 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2833 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2834 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2835 FLASH_IMAGE_MAX_SIZE_g2},
2836 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2837 FLASH_IMAGE_MAX_SIZE_g2},
2838 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2839 FLASH_IMAGE_MAX_SIZE_g2}
2840 };
2841
2842 if (adapter->generation == BE_GEN3) {
2843 pflashcomp = gen3_flash_types;
2844 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002845 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002846 } else {
2847 pflashcomp = gen2_flash_types;
2848 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002849 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002850 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002851 for (i = 0; i < num_comp; i++) {
2852 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2853 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2854 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002855 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2856 if (!phy_flashing_required(adapter))
2857 continue;
2858 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002859 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2860 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002861 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2862 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002863 continue;
2864 p = fw->data;
2865 p += filehdr_size + pflashcomp[i].offset
2866 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002867 if (p + pflashcomp[i].size > fw->data + fw->size)
2868 return -1;
2869 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002870 while (total_bytes) {
2871 if (total_bytes > 32*1024)
2872 num_bytes = 32*1024;
2873 else
2874 num_bytes = total_bytes;
2875 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002876 if (!total_bytes) {
2877 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2878 flash_op = FLASHROM_OPER_PHY_FLASH;
2879 else
2880 flash_op = FLASHROM_OPER_FLASH;
2881 } else {
2882 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2883 flash_op = FLASHROM_OPER_PHY_SAVE;
2884 else
2885 flash_op = FLASHROM_OPER_SAVE;
2886 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002887 memcpy(req->params.data_buf, p, num_bytes);
2888 p += num_bytes;
2889 status = be_cmd_write_flashrom(adapter, flash_cmd,
2890 pflashcomp[i].optype, flash_op, num_bytes);
2891 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002892 if ((status == ILLEGAL_IOCTL_REQ) &&
2893 (pflashcomp[i].optype ==
2894 IMG_TYPE_PHY_FW))
2895 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002896 dev_err(&adapter->pdev->dev,
2897 "cmd to write to flash rom failed.\n");
2898 return -1;
2899 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002900 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002901 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002902 return 0;
2903}
2904
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002905static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2906{
2907 if (fhdr == NULL)
2908 return 0;
2909 if (fhdr->build[0] == '3')
2910 return BE_GEN3;
2911 else if (fhdr->build[0] == '2')
2912 return BE_GEN2;
2913 else
2914 return 0;
2915}
2916
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002917static int lancer_fw_download(struct be_adapter *adapter,
2918 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002919{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002920#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2921#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2922 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002923 const u8 *data_ptr = NULL;
2924 u8 *dest_image_ptr = NULL;
2925 size_t image_size = 0;
2926 u32 chunk_size = 0;
2927 u32 data_written = 0;
2928 u32 offset = 0;
2929 int status = 0;
2930 u8 add_status = 0;
2931
2932 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2933 dev_err(&adapter->pdev->dev,
2934 "FW Image not properly aligned. "
2935 "Length must be 4 byte aligned.\n");
2936 status = -EINVAL;
2937 goto lancer_fw_exit;
2938 }
2939
2940 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2941 + LANCER_FW_DOWNLOAD_CHUNK;
2942 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2943 &flash_cmd.dma, GFP_KERNEL);
2944 if (!flash_cmd.va) {
2945 status = -ENOMEM;
2946 dev_err(&adapter->pdev->dev,
2947 "Memory allocation failure while flashing\n");
2948 goto lancer_fw_exit;
2949 }
2950
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002951 dest_image_ptr = flash_cmd.va +
2952 sizeof(struct lancer_cmd_req_write_object);
2953 image_size = fw->size;
2954 data_ptr = fw->data;
2955
2956 while (image_size) {
2957 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2958
2959 /* Copy the image chunk content. */
2960 memcpy(dest_image_ptr, data_ptr, chunk_size);
2961
2962 status = lancer_cmd_write_object(adapter, &flash_cmd,
2963 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2964 &data_written, &add_status);
2965
2966 if (status)
2967 break;
2968
2969 offset += data_written;
2970 data_ptr += data_written;
2971 image_size -= data_written;
2972 }
2973
2974 if (!status) {
2975 /* Commit the FW written */
2976 status = lancer_cmd_write_object(adapter, &flash_cmd,
2977 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2978 &data_written, &add_status);
2979 }
2980
2981 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2982 flash_cmd.dma);
2983 if (status) {
2984 dev_err(&adapter->pdev->dev,
2985 "Firmware load error. "
2986 "Status code: 0x%x Additional Status: 0x%x\n",
2987 status, add_status);
2988 goto lancer_fw_exit;
2989 }
2990
2991 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2992lancer_fw_exit:
2993 return status;
2994}
2995
2996static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2997{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002998 struct flash_file_hdr_g2 *fhdr;
2999 struct flash_file_hdr_g3 *fhdr3;
3000 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003001 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003002 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003003 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003004
3005 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003006 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003007
Ajit Khaparde84517482009-09-04 03:12:16 +00003008 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003009 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3010 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003011 if (!flash_cmd.va) {
3012 status = -ENOMEM;
3013 dev_err(&adapter->pdev->dev,
3014 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003015 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003016 }
3017
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003018 if ((adapter->generation == BE_GEN3) &&
3019 (get_ufigen_type(fhdr) == BE_GEN3)) {
3020 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003021 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3022 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003023 img_hdr_ptr = (struct image_hdr *) (fw->data +
3024 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003025 i * sizeof(struct image_hdr)));
3026 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3027 status = be_flash_data(adapter, fw, &flash_cmd,
3028 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003029 }
3030 } else if ((adapter->generation == BE_GEN2) &&
3031 (get_ufigen_type(fhdr) == BE_GEN2)) {
3032 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3033 } else {
3034 dev_err(&adapter->pdev->dev,
3035 "UFI and Interface are not compatible for flashing\n");
3036 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003037 }
3038
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003039 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3040 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003041 if (status) {
3042 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003043 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003044 }
3045
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003046 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003047
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003048be_fw_exit:
3049 return status;
3050}
3051
3052int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3053{
3054 const struct firmware *fw;
3055 int status;
3056
3057 if (!netif_running(adapter->netdev)) {
3058 dev_err(&adapter->pdev->dev,
3059 "Firmware load not allowed (interface is down)\n");
3060 return -1;
3061 }
3062
3063 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3064 if (status)
3065 goto fw_exit;
3066
3067 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3068
3069 if (lancer_chip(adapter))
3070 status = lancer_fw_download(adapter, fw);
3071 else
3072 status = be_fw_download(adapter, fw);
3073
Ajit Khaparde84517482009-09-04 03:12:16 +00003074fw_exit:
3075 release_firmware(fw);
3076 return status;
3077}
3078
stephen hemmingere5686ad2012-01-05 19:10:25 +00003079static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080 .ndo_open = be_open,
3081 .ndo_stop = be_close,
3082 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003083 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084 .ndo_set_mac_address = be_mac_addr_set,
3085 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003086 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3089 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003090 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003091 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003092 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003093 .ndo_get_vf_config = be_get_vf_config,
3094#ifdef CONFIG_NET_POLL_CONTROLLER
3095 .ndo_poll_controller = be_netpoll,
3096#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003097};
3098
3099static void be_netdev_init(struct net_device *netdev)
3100{
3101 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003102 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003103 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003105 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003106 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3107 NETIF_F_HW_VLAN_TX;
3108 if (be_multi_rxq(adapter))
3109 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003110
3111 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003112 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003113
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003114 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003115 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003116
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003117 netdev->priv_flags |= IFF_UNICAST_FLT;
3118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119 netdev->flags |= IFF_MULTICAST;
3120
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003121 netif_set_gso_max_size(netdev, 65535);
3122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003123 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124
3125 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3126
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003127 for_all_evt_queues(adapter, eqo, i)
3128 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003129}
3130
3131static void be_unmap_pci_bars(struct be_adapter *adapter)
3132{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003133 if (adapter->csr)
3134 iounmap(adapter->csr);
3135 if (adapter->db)
3136 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003137}
3138
3139static int be_map_pci_bars(struct be_adapter *adapter)
3140{
3141 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003142 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003144 if (lancer_chip(adapter)) {
3145 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3146 pci_resource_len(adapter->pdev, 0));
3147 if (addr == NULL)
3148 return -ENOMEM;
3149 adapter->db = addr;
3150 return 0;
3151 }
3152
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003153 if (be_physfn(adapter)) {
3154 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3155 pci_resource_len(adapter->pdev, 2));
3156 if (addr == NULL)
3157 return -ENOMEM;
3158 adapter->csr = addr;
3159 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003160
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003161 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003162 db_reg = 4;
3163 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003164 if (be_physfn(adapter))
3165 db_reg = 4;
3166 else
3167 db_reg = 0;
3168 }
3169 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3170 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171 if (addr == NULL)
3172 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003173 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003174
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003175 return 0;
3176pci_map_err:
3177 be_unmap_pci_bars(adapter);
3178 return -ENOMEM;
3179}
3180
3181
3182static void be_ctrl_cleanup(struct be_adapter *adapter)
3183{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003184 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003185
3186 be_unmap_pci_bars(adapter);
3187
3188 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003189 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3190 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003191
Sathya Perla5b8821b2011-08-02 19:57:44 +00003192 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003193 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003194 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3195 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196}
3197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003198static int be_ctrl_init(struct be_adapter *adapter)
3199{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003200 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3201 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003202 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204
3205 status = be_map_pci_bars(adapter);
3206 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003207 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208
3209 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003210 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3211 mbox_mem_alloc->size,
3212 &mbox_mem_alloc->dma,
3213 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003215 status = -ENOMEM;
3216 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217 }
3218 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3219 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3220 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3221 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003222
Sathya Perla5b8821b2011-08-02 19:57:44 +00003223 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3224 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3225 &rx_filter->dma, GFP_KERNEL);
3226 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003227 status = -ENOMEM;
3228 goto free_mbox;
3229 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003230 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003231
Ivan Vecera29849612010-12-14 05:43:19 +00003232 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003233 spin_lock_init(&adapter->mcc_lock);
3234 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003235
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003236 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003237 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003239
3240free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003241 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3242 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003243
3244unmap_pci_bars:
3245 be_unmap_pci_bars(adapter);
3246
3247done:
3248 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003249}
3250
3251static void be_stats_cleanup(struct be_adapter *adapter)
3252{
Sathya Perla3abcded2010-10-03 22:12:27 -07003253 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254
3255 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003256 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3257 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003258}
3259
3260static int be_stats_init(struct be_adapter *adapter)
3261{
Sathya Perla3abcded2010-10-03 22:12:27 -07003262 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263
Selvin Xavier005d5692011-05-16 07:36:35 +00003264 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003265 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003266 } else {
3267 if (lancer_chip(adapter))
3268 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3269 else
3270 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3271 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003272 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3273 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274 if (cmd->va == NULL)
3275 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003276 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003277 return 0;
3278}
3279
3280static void __devexit be_remove(struct pci_dev *pdev)
3281{
3282 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 if (!adapter)
3285 return;
3286
3287 unregister_netdev(adapter->netdev);
3288
Sathya Perla5fb379e2009-06-18 00:02:59 +00003289 be_clear(adapter);
3290
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003291 be_stats_cleanup(adapter);
3292
3293 be_ctrl_cleanup(adapter);
3294
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003295 be_sriov_disable(adapter);
3296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297 pci_set_drvdata(pdev, NULL);
3298 pci_release_regions(pdev);
3299 pci_disable_device(pdev);
3300
3301 free_netdev(adapter->netdev);
3302}
3303
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003304bool be_is_wol_supported(struct be_adapter *adapter)
3305{
3306 return ((adapter->wol_cap & BE_WOL_CAP) &&
3307 !be_is_wol_excluded(adapter)) ? true : false;
3308}
3309
Sathya Perla2243e2e2009-11-22 22:02:03 +00003310static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003313
Sathya Perla3abcded2010-10-03 22:12:27 -07003314 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3315 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003316 if (status)
3317 return status;
3318
Sathya Perla752961a2011-10-24 02:45:03 +00003319 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003320 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003321 else
3322 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3323
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003324 if (be_physfn(adapter))
3325 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3326 else
3327 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3328
3329 /* primary mac needs 1 pmac entry */
3330 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3331 sizeof(u32), GFP_KERNEL);
3332 if (!adapter->pmac_id)
3333 return -ENOMEM;
3334
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003335 status = be_cmd_get_cntl_attributes(adapter);
3336 if (status)
3337 return status;
3338
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003339 status = be_cmd_get_acpi_wol_cap(adapter);
3340 if (status) {
3341 /* in case of a failure to get wol capabillities
3342 * check the exclusion list to determine WOL capability */
3343 if (!be_is_wol_excluded(adapter))
3344 adapter->wol_cap |= BE_WOL_CAP;
3345 }
3346
3347 if (be_is_wol_supported(adapter))
3348 adapter->wol = true;
3349
Sathya Perla2243e2e2009-11-22 22:02:03 +00003350 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003351}
3352
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003353static int be_dev_family_check(struct be_adapter *adapter)
3354{
3355 struct pci_dev *pdev = adapter->pdev;
3356 u32 sli_intf = 0, if_type;
3357
3358 switch (pdev->device) {
3359 case BE_DEVICE_ID1:
3360 case OC_DEVICE_ID1:
3361 adapter->generation = BE_GEN2;
3362 break;
3363 case BE_DEVICE_ID2:
3364 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003365 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003366 adapter->generation = BE_GEN3;
3367 break;
3368 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003369 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003370 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3371 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3372 SLI_INTF_IF_TYPE_SHIFT;
3373
3374 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3375 if_type != 0x02) {
3376 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3377 return -EINVAL;
3378 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003379 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3380 SLI_INTF_FAMILY_SHIFT);
3381 adapter->generation = BE_GEN3;
3382 break;
3383 default:
3384 adapter->generation = 0;
3385 }
3386 return 0;
3387}
3388
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003389static int lancer_wait_ready(struct be_adapter *adapter)
3390{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003391#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003392 u32 sliport_status;
3393 int status = 0, i;
3394
3395 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3396 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3397 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3398 break;
3399
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003400 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003401 }
3402
3403 if (i == SLIPORT_READY_TIMEOUT)
3404 status = -1;
3405
3406 return status;
3407}
3408
3409static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3410{
3411 int status;
3412 u32 sliport_status, err, reset_needed;
3413 status = lancer_wait_ready(adapter);
3414 if (!status) {
3415 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3416 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3417 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3418 if (err && reset_needed) {
3419 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3420 adapter->db + SLIPORT_CONTROL_OFFSET);
3421
3422 /* check adapter has corrected the error */
3423 status = lancer_wait_ready(adapter);
3424 sliport_status = ioread32(adapter->db +
3425 SLIPORT_STATUS_OFFSET);
3426 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3427 SLIPORT_STATUS_RN_MASK);
3428 if (status || sliport_status)
3429 status = -1;
3430 } else if (err || reset_needed) {
3431 status = -1;
3432 }
3433 }
3434 return status;
3435}
3436
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003437static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3438{
3439 int status;
3440 u32 sliport_status;
3441
3442 if (adapter->eeh_err || adapter->ue_detected)
3443 return;
3444
3445 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3446
3447 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3448 dev_err(&adapter->pdev->dev,
3449 "Adapter in error state."
3450 "Trying to recover.\n");
3451
3452 status = lancer_test_and_set_rdy_state(adapter);
3453 if (status)
3454 goto err;
3455
3456 netif_device_detach(adapter->netdev);
3457
3458 if (netif_running(adapter->netdev))
3459 be_close(adapter->netdev);
3460
3461 be_clear(adapter);
3462
3463 adapter->fw_timeout = false;
3464
3465 status = be_setup(adapter);
3466 if (status)
3467 goto err;
3468
3469 if (netif_running(adapter->netdev)) {
3470 status = be_open(adapter->netdev);
3471 if (status)
3472 goto err;
3473 }
3474
3475 netif_device_attach(adapter->netdev);
3476
3477 dev_err(&adapter->pdev->dev,
3478 "Adapter error recovery succeeded\n");
3479 }
3480 return;
3481err:
3482 dev_err(&adapter->pdev->dev,
3483 "Adapter error recovery failed\n");
3484}
3485
3486static void be_worker(struct work_struct *work)
3487{
3488 struct be_adapter *adapter =
3489 container_of(work, struct be_adapter, work.work);
3490 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003491 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003492 int i;
3493
3494 if (lancer_chip(adapter))
3495 lancer_test_and_recover_fn_err(adapter);
3496
3497 be_detect_dump_ue(adapter);
3498
3499 /* when interrupts are not yet enabled, just reap any pending
3500 * mcc completions */
3501 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003502 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003503 goto reschedule;
3504 }
3505
3506 if (!adapter->stats_cmd_sent) {
3507 if (lancer_chip(adapter))
3508 lancer_cmd_get_pport_stats(adapter,
3509 &adapter->stats_cmd);
3510 else
3511 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3512 }
3513
3514 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003515 if (rxo->rx_post_starved) {
3516 rxo->rx_post_starved = false;
3517 be_post_rx_frags(rxo, GFP_KERNEL);
3518 }
3519 }
3520
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003521 for_all_evt_queues(adapter, eqo, i)
3522 be_eqd_update(adapter, eqo);
3523
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003524reschedule:
3525 adapter->work_counter++;
3526 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3527}
3528
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003529static int __devinit be_probe(struct pci_dev *pdev,
3530 const struct pci_device_id *pdev_id)
3531{
3532 int status = 0;
3533 struct be_adapter *adapter;
3534 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535
3536 status = pci_enable_device(pdev);
3537 if (status)
3538 goto do_none;
3539
3540 status = pci_request_regions(pdev, DRV_NAME);
3541 if (status)
3542 goto disable_dev;
3543 pci_set_master(pdev);
3544
Sathya Perla3c8def92011-06-12 20:01:58 +00003545 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546 if (netdev == NULL) {
3547 status = -ENOMEM;
3548 goto rel_reg;
3549 }
3550 adapter = netdev_priv(netdev);
3551 adapter->pdev = pdev;
3552 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003553
3554 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003555 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003556 goto free_netdev;
3557
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003559 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003560
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003561 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003562 if (!status) {
3563 netdev->features |= NETIF_F_HIGHDMA;
3564 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003565 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566 if (status) {
3567 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3568 goto free_netdev;
3569 }
3570 }
3571
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003572 status = be_sriov_enable(adapter);
3573 if (status)
3574 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003575
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003576 status = be_ctrl_init(adapter);
3577 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003578 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003579
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003580 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003581 status = lancer_wait_ready(adapter);
3582 if (!status) {
3583 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3584 adapter->db + SLIPORT_CONTROL_OFFSET);
3585 status = lancer_test_and_set_rdy_state(adapter);
3586 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003587 if (status) {
3588 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003589 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003590 }
3591 }
3592
Sathya Perla2243e2e2009-11-22 22:02:03 +00003593 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003594 if (be_physfn(adapter)) {
3595 status = be_cmd_POST(adapter);
3596 if (status)
3597 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003598 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003599
3600 /* tell fw we're ready to fire cmds */
3601 status = be_cmd_fw_init(adapter);
3602 if (status)
3603 goto ctrl_clean;
3604
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003605 status = be_cmd_reset_function(adapter);
3606 if (status)
3607 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003608
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003609 /* The INTR bit may be set in the card when probed by a kdump kernel
3610 * after a crash.
3611 */
3612 if (!lancer_chip(adapter))
3613 be_intr_set(adapter, false);
3614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003615 status = be_stats_init(adapter);
3616 if (status)
3617 goto ctrl_clean;
3618
Sathya Perla2243e2e2009-11-22 22:02:03 +00003619 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 if (status)
3621 goto stats_clean;
3622
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003623 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003624 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003625
Sathya Perla5fb379e2009-06-18 00:02:59 +00003626 status = be_setup(adapter);
3627 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003628 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003629
Sathya Perla3abcded2010-10-03 22:12:27 -07003630 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003631 status = register_netdev(netdev);
3632 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003633 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003634
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003635 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3636 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003637
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003638 return 0;
3639
Sathya Perla5fb379e2009-06-18 00:02:59 +00003640unsetup:
3641 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003642msix_disable:
3643 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644stats_clean:
3645 be_stats_cleanup(adapter);
3646ctrl_clean:
3647 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003648disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003649 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003650free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003651 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003652 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003653rel_reg:
3654 pci_release_regions(pdev);
3655disable_dev:
3656 pci_disable_device(pdev);
3657do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003658 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003659 return status;
3660}
3661
3662static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3663{
3664 struct be_adapter *adapter = pci_get_drvdata(pdev);
3665 struct net_device *netdev = adapter->netdev;
3666
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003667 if (adapter->wol)
3668 be_setup_wol(adapter, true);
3669
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003670 netif_device_detach(netdev);
3671 if (netif_running(netdev)) {
3672 rtnl_lock();
3673 be_close(netdev);
3674 rtnl_unlock();
3675 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003676 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677
3678 pci_save_state(pdev);
3679 pci_disable_device(pdev);
3680 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3681 return 0;
3682}
3683
3684static int be_resume(struct pci_dev *pdev)
3685{
3686 int status = 0;
3687 struct be_adapter *adapter = pci_get_drvdata(pdev);
3688 struct net_device *netdev = adapter->netdev;
3689
3690 netif_device_detach(netdev);
3691
3692 status = pci_enable_device(pdev);
3693 if (status)
3694 return status;
3695
3696 pci_set_power_state(pdev, 0);
3697 pci_restore_state(pdev);
3698
Sathya Perla2243e2e2009-11-22 22:02:03 +00003699 /* tell fw we're ready to fire cmds */
3700 status = be_cmd_fw_init(adapter);
3701 if (status)
3702 return status;
3703
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003704 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705 if (netif_running(netdev)) {
3706 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003707 be_open(netdev);
3708 rtnl_unlock();
3709 }
3710 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003711
3712 if (adapter->wol)
3713 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003714
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003715 return 0;
3716}
3717
Sathya Perla82456b02010-02-17 01:35:37 +00003718/*
3719 * An FLR will stop BE from DMAing any data.
3720 */
3721static void be_shutdown(struct pci_dev *pdev)
3722{
3723 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003724
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003725 if (!adapter)
3726 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003727
Sathya Perla0f4a6822011-03-21 20:49:28 +00003728 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003729
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003730 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003731
Sathya Perla82456b02010-02-17 01:35:37 +00003732 if (adapter->wol)
3733 be_setup_wol(adapter, true);
3734
Ajit Khaparde57841862011-04-06 18:08:43 +00003735 be_cmd_reset_function(adapter);
3736
Sathya Perla82456b02010-02-17 01:35:37 +00003737 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003738}
3739
Sathya Perlacf588472010-02-14 21:22:01 +00003740static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3741 pci_channel_state_t state)
3742{
3743 struct be_adapter *adapter = pci_get_drvdata(pdev);
3744 struct net_device *netdev = adapter->netdev;
3745
3746 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3747
3748 adapter->eeh_err = true;
3749
3750 netif_device_detach(netdev);
3751
3752 if (netif_running(netdev)) {
3753 rtnl_lock();
3754 be_close(netdev);
3755 rtnl_unlock();
3756 }
3757 be_clear(adapter);
3758
3759 if (state == pci_channel_io_perm_failure)
3760 return PCI_ERS_RESULT_DISCONNECT;
3761
3762 pci_disable_device(pdev);
3763
3764 return PCI_ERS_RESULT_NEED_RESET;
3765}
3766
3767static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3768{
3769 struct be_adapter *adapter = pci_get_drvdata(pdev);
3770 int status;
3771
3772 dev_info(&adapter->pdev->dev, "EEH reset\n");
3773 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003774 adapter->ue_detected = false;
3775 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003776
3777 status = pci_enable_device(pdev);
3778 if (status)
3779 return PCI_ERS_RESULT_DISCONNECT;
3780
3781 pci_set_master(pdev);
3782 pci_set_power_state(pdev, 0);
3783 pci_restore_state(pdev);
3784
3785 /* Check if card is ok and fw is ready */
3786 status = be_cmd_POST(adapter);
3787 if (status)
3788 return PCI_ERS_RESULT_DISCONNECT;
3789
3790 return PCI_ERS_RESULT_RECOVERED;
3791}
3792
3793static void be_eeh_resume(struct pci_dev *pdev)
3794{
3795 int status = 0;
3796 struct be_adapter *adapter = pci_get_drvdata(pdev);
3797 struct net_device *netdev = adapter->netdev;
3798
3799 dev_info(&adapter->pdev->dev, "EEH resume\n");
3800
3801 pci_save_state(pdev);
3802
3803 /* tell fw we're ready to fire cmds */
3804 status = be_cmd_fw_init(adapter);
3805 if (status)
3806 goto err;
3807
3808 status = be_setup(adapter);
3809 if (status)
3810 goto err;
3811
3812 if (netif_running(netdev)) {
3813 status = be_open(netdev);
3814 if (status)
3815 goto err;
3816 }
3817 netif_device_attach(netdev);
3818 return;
3819err:
3820 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003821}
3822
3823static struct pci_error_handlers be_eeh_handlers = {
3824 .error_detected = be_eeh_err_detected,
3825 .slot_reset = be_eeh_reset,
3826 .resume = be_eeh_resume,
3827};
3828
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829static struct pci_driver be_driver = {
3830 .name = DRV_NAME,
3831 .id_table = be_dev_ids,
3832 .probe = be_probe,
3833 .remove = be_remove,
3834 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003835 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003836 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003837 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838};
3839
3840static int __init be_init_module(void)
3841{
Joe Perches8e95a202009-12-03 07:58:21 +00003842 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3843 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844 printk(KERN_WARNING DRV_NAME
3845 " : Module param rx_frag_size must be 2048/4096/8192."
3846 " Using 2048\n");
3847 rx_frag_size = 2048;
3848 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003849
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003850 return pci_register_driver(&be_driver);
3851}
3852module_init(be_init_module);
3853
3854static void __exit be_exit_module(void)
3855{
3856 pci_unregister_driver(&be_driver);
3857}
3858module_exit(be_exit_module);