blob: fe702c1c866d5508215968f23632087fc83b51ce [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perla09c1c682011-08-22 19:41:53 +0000396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408void be_parse_stats(struct be_adapter *adapter)
409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431}
432
Sathya Perlaab1594e2011-07-25 19:10:15 +0000433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000439 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 u64 pkts, bytes;
441 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700442 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700443
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700456 }
457
Sathya Perla3c8def92011-06-12 20:01:58 +0000458 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000467 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468
469 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000479 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
488 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000490
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Sathya Perlaea172a02011-08-02 19:57:42 +0000499void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 struct net_device *netdev = adapter->netdev;
502
Sathya Perlaea172a02011-08-02 19:57:42 +0000503 /* when link status changes, link speed must be re-queried from card */
504 adapter->link_speed = -1;
505 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
506 netif_carrier_on(netdev);
507 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
508 } else {
509 netif_carrier_off(netdev);
510 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
Sathya Perla3c8def92011-06-12 20:01:58 +0000514static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perla3c8def92011-06-12 20:01:58 +0000517 struct be_tx_stats *stats = tx_stats(txo);
518
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* to account for hdr wrb */
538 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
Somnath Koturcc4ce022010-10-21 07:11:14 -0700573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000576 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000582 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700605 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000625 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000626 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000629 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000631 }
632}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla3c8def92011-06-12 20:01:58 +0000634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
Sathya Perla7101e112010-03-22 20:41:12 +0000637 dma_addr_t busaddr;
638 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000643 bool map_single = false;
644 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000648 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700651 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
655 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000664 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700665 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000666 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000667 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000669 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000674 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
Somnath Koturcc4ce022010-10-21 07:11:14 -0700684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000692 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Stephen Hemminger613573252009-08-31 19:50:58 +0000700static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700701 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perla3c8def92011-06-12 20:01:58 +0000731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000732 if (copied) {
733 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
Sathya Perla7101e112010-03-22 20:41:12 +0000741 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000745 stopped = true;
746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 be_txq_notify(adapter, txq->id, wrb_cnt);
749
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000751 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000756tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000787 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788
789 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
Ajit Khaparde82903e42010-02-09 01:34:57 +0000799 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000801 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000813
Sathya Perlab31c50a2009-09-17 10:30:13 -0700814 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Jiri Pirko8e586132011-12-08 19:52:37 -0500817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818{
819 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000820 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000822 if (!be_physfn(adapter)) {
823 status = -EINVAL;
824 goto ret;
825 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000828 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!status)
832 adapter->vlans_added++;
833 else
834 adapter->vlan_tag[vid] = 0;
835ret:
836 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Jiri Pirko8e586132011-12-08 19:52:37 -0500839static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840{
841 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000842 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000844 if (!be_physfn(adapter)) {
845 status = -EINVAL;
846 goto ret;
847 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000848
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000850 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!status)
854 adapter->vlans_added--;
855 else
856 adapter->vlan_tag[vid] = 1;
857ret:
858 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859}
860
Sathya Perlaa54769f2011-10-24 02:45:00 +0000861static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864
865 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000866 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000867 adapter->promiscuous = true;
868 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000870
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300871 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000872 if (adapter->promiscuous) {
873 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000875
876 if (adapter->vlans_added)
877 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000878 }
879
Sathya Perlae7b909a2009-11-22 22:01:10 +0000880 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000881 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 netdev_mc_count(netdev) > BE_MAX_MC) {
883 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000884 goto done;
885 }
886
Sathya Perla5b8821b2011-08-02 19:57:44 +0000887 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888done:
889 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890}
891
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000892static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
893{
894 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000895 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000896 int status;
897
Sathya Perla11ac75e2011-12-13 00:58:50 +0000898 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000899 return -EPERM;
900
Sathya Perla11ac75e2011-12-13 00:58:50 +0000901 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000902 return -EINVAL;
903
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000904 if (lancer_chip(adapter)) {
905 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
906 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000907 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000909
Sathya Perla11ac75e2011-12-13 00:58:50 +0000910 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000912 }
913
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000914 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000915 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
916 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000917 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000918 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000919
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000920 return status;
921}
922
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000923static int be_get_vf_config(struct net_device *netdev, int vf,
924 struct ifla_vf_info *vi)
925{
926 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000927 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000928
Sathya Perla11ac75e2011-12-13 00:58:50 +0000929 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 return -EPERM;
931
Sathya Perla11ac75e2011-12-13 00:58:50 +0000932 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933 return -EINVAL;
934
935 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 vi->tx_rate = vf_cfg->tx_rate;
937 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000938 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000940
941 return 0;
942}
943
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000944static int be_set_vf_vlan(struct net_device *netdev,
945 int vf, u16 vlan, u8 qos)
946{
947 struct be_adapter *adapter = netdev_priv(netdev);
948 int status = 0;
949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000951 return -EPERM;
952
Sathya Perla11ac75e2011-12-13 00:58:50 +0000953 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000954 return -EINVAL;
955
956 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000957 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000958 adapter->vlans_added++;
959 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000960 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000961 adapter->vlans_added--;
962 }
963
964 status = be_vid_config(adapter, true, vf);
965
966 if (status)
967 dev_info(&adapter->pdev->dev,
968 "VLAN %d config on VF %d failed\n", vlan, vf);
969 return status;
970}
971
Ajit Khapardee1d18732010-07-23 01:52:13 +0000972static int be_set_vf_tx_rate(struct net_device *netdev,
973 int vf, int rate)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 int status = 0;
977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000979 return -EPERM;
980
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000981 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000982 return -EINVAL;
983
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000984 if (rate < 100 || rate > 10000) {
985 dev_err(&adapter->pdev->dev,
986 "tx rate must be between 100 and 10000 Mbps\n");
987 return -EINVAL;
988 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000989
Ajit Khaparde856c4012011-02-11 13:32:32 +0000990 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000991
992 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000993 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000994 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000995 else
996 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000997 return status;
998}
999
Sathya Perlaac124ff2011-07-25 19:10:14 +00001000static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001002 struct be_eq_obj *rx_eq = &rxo->rx_eq;
1003 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001004 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001005 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001006 u64 pkts;
1007 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001008
1009 if (!rx_eq->enable_aic)
1010 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011
Sathya Perla4097f662009-03-24 16:40:13 -07001012 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 if (time_before(now, stats->rx_jiffies)) {
1014 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001015 return;
1016 }
1017
Sathya Perlaac124ff2011-07-25 19:10:14 +00001018 /* Update once a second */
1019 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001020 return;
1021
Sathya Perlaab1594e2011-07-25 19:10:15 +00001022 do {
1023 start = u64_stats_fetch_begin_bh(&stats->sync);
1024 pkts = stats->rx_pkts;
1025 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1026
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001027 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001028 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001030 eqd = stats->rx_pps / 110000;
1031 eqd = eqd << 3;
1032 if (eqd > rx_eq->max_eqd)
1033 eqd = rx_eq->max_eqd;
1034 if (eqd < rx_eq->min_eqd)
1035 eqd = rx_eq->min_eqd;
1036 if (eqd < 10)
1037 eqd = 0;
1038 if (eqd != rx_eq->cur_eqd) {
1039 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1040 rx_eq->cur_eqd = eqd;
1041 }
Sathya Perla4097f662009-03-24 16:40:13 -07001042}
1043
Sathya Perla3abcded2010-10-03 22:12:27 -07001044static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001046{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001047 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001048
Sathya Perlaab1594e2011-07-25 19:10:15 +00001049 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001050 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001052 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001053 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001054 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001056 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001057 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058}
1059
Sathya Perla2e588f82011-03-11 02:49:26 +00001060static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001061{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001062 /* L4 checksum is not reliable for non TCP/UDP packets.
1063 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001064 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1065 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001066}
1067
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001069get_rx_page_info(struct be_adapter *adapter,
1070 struct be_rx_obj *rxo,
1071 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072{
1073 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001074 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075
Sathya Perla3abcded2010-10-03 22:12:27 -07001076 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 BUG_ON(!rx_page_info->page);
1078
Ajit Khaparde205859a2010-02-09 01:34:21 +00001079 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001080 dma_unmap_page(&adapter->pdev->dev,
1081 dma_unmap_addr(rx_page_info, bus),
1082 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001083 rx_page_info->last_page_user = false;
1084 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
1086 atomic_dec(&rxq->used);
1087 return rx_page_info;
1088}
1089
1090/* Throwaway the data in the Rx completion */
1091static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001092 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094{
Sathya Perla3abcded2010-10-03 22:12:27 -07001095 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001099 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001101 put_page(page_info->page);
1102 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 }
1105}
1106
1107/*
1108 * skb_fill_rx_data forms a complete skb for an ether frame
1109 * indicated by rxcp.
1110 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001111static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113{
Sathya Perla3abcded2010-10-03 22:12:27 -07001114 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 u16 i, j;
1117 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 u8 *start;
1119
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 start = page_address(page_info->page) + page_info->page_offset;
1122 prefetch(start);
1123
1124 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
1127 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 memcpy(skb->data, start, hdr_len);
1130 skb->len = curr_frag_len;
1131 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1132 /* Complete packet has now been moved to data */
1133 put_page(page_info->page);
1134 skb->data_len = 0;
1135 skb->tail += curr_frag_len;
1136 } else {
1137 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001138 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 skb_shinfo(skb)->frags[0].page_offset =
1140 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001141 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001143 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144 skb->tail += hdr_len;
1145 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001146 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Sathya Perla2e588f82011-03-11 02:49:26 +00001148 if (rxcp->pkt_size <= rx_frag_size) {
1149 BUG_ON(rxcp->num_rcvd != 1);
1150 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 }
1152
1153 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001154 index_inc(&rxcp->rxq_idx, rxq->len);
1155 remaining = rxcp->pkt_size - curr_frag_len;
1156 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1157 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1158 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001160 /* Coalesce all frags from the same physical page in one slot */
1161 if (page_info->page_offset == 0) {
1162 /* Fresh page */
1163 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001164 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165 skb_shinfo(skb)->frags[j].page_offset =
1166 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001168 skb_shinfo(skb)->nr_frags++;
1169 } else {
1170 put_page(page_info->page);
1171 }
1172
Eric Dumazet9e903e02011-10-18 21:00:24 +00001173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 skb->len += curr_frag_len;
1175 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001176 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001177 remaining -= curr_frag_len;
1178 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001179 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001181 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182}
1183
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001184/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001187 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001189 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001191
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001192 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001193 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001195 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 return;
1197 }
1198
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001201 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001202 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001203 else
1204 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001206 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001207 if (adapter->netdev->features & NETIF_F_RXHASH)
1208 skb->rxhash = rxcp->rss_hash;
1209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210
Jiri Pirko343e43c2011-08-25 02:50:51 +00001211 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001212 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1213
1214 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215}
1216
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001217/* Process the RX completion indicated by rxcp when GRO is enabled */
1218static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221{
1222 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001223 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001224 struct be_queue_info *rxq = &rxo->q;
1225 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 u16 remaining, curr_frag_len;
1227 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001228
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001229 skb = napi_get_frags(&eq_obj->napi);
1230 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001231 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001232 return;
1233 }
1234
Sathya Perla2e588f82011-03-11 02:49:26 +00001235 remaining = rxcp->pkt_size;
1236 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1237 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238
1239 curr_frag_len = min(remaining, rx_frag_size);
1240
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 /* Coalesce all frags from the same physical page in one slot */
1242 if (i == 0 || page_info->page_offset == 0) {
1243 /* First frag or Fresh page */
1244 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001245 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001246 skb_shinfo(skb)->frags[j].page_offset =
1247 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001248 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001249 } else {
1250 put_page(page_info->page);
1251 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001252 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001253 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001255 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 memset(page_info, 0, sizeof(*page_info));
1257 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001258 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001260 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 skb->len = rxcp->pkt_size;
1262 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001264 if (adapter->netdev->features & NETIF_F_RXHASH)
1265 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001266
Jiri Pirko343e43c2011-08-25 02:50:51 +00001267 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001268 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1269
1270 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271}
1272
Sathya Perla2e588f82011-03-11 02:49:26 +00001273static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Sathya Perla2e588f82011-03-11 02:49:26 +00001306static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1307 struct be_eth_rx_compl *compl,
1308 struct be_rx_compl_info *rxcp)
1309{
1310 rxcp->pkt_size =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1312 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1313 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1314 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001315 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001316 rxcp->ip_csum =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1318 rxcp->l4_csum =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1320 rxcp->ipv6 =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1322 rxcp->rxq_idx =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1324 rxcp->num_rcvd =
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1326 rxcp->pkt_type =
1327 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001328 rxcp->rss_hash =
1329 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001330 if (rxcp->vlanf) {
1331 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001332 compl);
1333 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1334 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001335 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001336 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001337}
1338
1339static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1340{
1341 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1342 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1343 struct be_adapter *adapter = rxo->adapter;
1344
1345 /* For checking the valid bit it is Ok to use either definition as the
1346 * valid bit is at the same position in both v0 and v1 Rx compl */
1347 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 return NULL;
1349
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001350 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001351 be_dws_le_to_cpu(compl, sizeof(*compl));
1352
1353 if (adapter->be3_native)
1354 be_parse_rx_compl_v1(adapter, compl, rxcp);
1355 else
1356 be_parse_rx_compl_v0(adapter, compl, rxcp);
1357
Sathya Perla15d72182011-03-21 20:49:26 +00001358 if (rxcp->vlanf) {
1359 /* vlanf could be wrongly set in some cards.
1360 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001361 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001362 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363
Sathya Perla15d72182011-03-21 20:49:26 +00001364 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001365 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001366
Somnath Kotur939cf302011-08-18 21:51:49 -07001367 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001368 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001369 rxcp->vlanf = 0;
1370 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001371
1372 /* As the compl has been parsed, reset it; we wont touch it again */
1373 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Sathya Perla3abcded2010-10-03 22:12:27 -07001375 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 return rxcp;
1377}
1378
Eric Dumazet1829b082011-03-01 05:48:12 +00001379static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001382
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001384 gfp |= __GFP_COMP;
1385 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386}
1387
1388/*
1389 * Allocate a page, split it to fragments of size rx_frag_size and post as
1390 * receive buffers to BE
1391 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001392static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
Sathya Perla3abcded2010-10-03 22:12:27 -07001394 struct be_adapter *adapter = rxo->adapter;
1395 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001396 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 struct page *pagep = NULL;
1399 struct be_eth_rx_d *rxd;
1400 u64 page_dmaaddr = 0, frag_dmaaddr;
1401 u32 posted, page_offset = 0;
1402
Sathya Perla3abcded2010-10-03 22:12:27 -07001403 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001406 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001408 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 break;
1410 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001411 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412 0, adapter->big_page_size,
1413 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 page_info->page_offset = 0;
1415 } else {
1416 get_page(pagep);
1417 page_info->page_offset = page_offset + rx_frag_size;
1418 }
1419 page_offset = page_info->page_offset;
1420 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001421 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423
1424 rxd = queue_head_node(rxq);
1425 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
1428 /* Any space left in the current big page for another frag? */
1429 if ((page_offset + rx_frag_size + rx_frag_size) >
1430 adapter->big_page_size) {
1431 pagep = NULL;
1432 page_info->last_page_user = true;
1433 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001434
1435 prev_page_info = page_info;
1436 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 page_info = &page_info_tbl[rxq->head];
1438 }
1439 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001440 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441
1442 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001444 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001445 } else if (atomic_read(&rxq->used) == 0) {
1446 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001447 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454
1455 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456 return NULL;
1457
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001458 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460
1461 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462
1463 queue_tail_inc(tx_cq);
1464 return txcp;
1465}
1466
Sathya Perla3c8def92011-06-12 20:01:58 +00001467static u16 be_tx_compl_process(struct be_adapter *adapter,
1468 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001471 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001472 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001474 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001479 sent_skbs[txq->tail] = NULL;
1480
1481 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001482 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001484 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001486 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001489 unmap_skb_hdr = false;
1490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 num_wrbs++;
1492 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001493 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001496 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497}
1498
Sathya Perla859b1e42009-08-10 03:43:51 +00001499static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1500{
1501 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1502
1503 if (!eqe->evt)
1504 return NULL;
1505
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001506 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001507 eqe->evt = le32_to_cpu(eqe->evt);
1508 queue_tail_inc(&eq_obj->q);
1509 return eqe;
1510}
1511
1512static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001513 struct be_eq_obj *eq_obj,
1514 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001515{
1516 struct be_eq_entry *eqe;
1517 u16 num = 0;
1518
1519 while ((eqe = event_get(eq_obj)) != NULL) {
1520 eqe->evt = 0;
1521 num++;
1522 }
1523
1524 /* Deal with any spurious interrupts that come
1525 * without events
1526 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001527 if (!num)
1528 rearm = true;
1529
1530 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001531 if (num)
1532 napi_schedule(&eq_obj->napi);
1533
1534 return num;
1535}
1536
1537/* Just read and notify events without processing them.
1538 * Used at the time of destroying event queues */
1539static void be_eq_clean(struct be_adapter *adapter,
1540 struct be_eq_obj *eq_obj)
1541{
1542 struct be_eq_entry *eqe;
1543 u16 num = 0;
1544
1545 while ((eqe = event_get(eq_obj)) != NULL) {
1546 eqe->evt = 0;
1547 num++;
1548 }
1549
1550 if (num)
1551 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1552}
1553
Sathya Perla3abcded2010-10-03 22:12:27 -07001554static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555{
1556 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 struct be_queue_info *rxq = &rxo->q;
1558 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001559 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 u16 tail;
1561
1562 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001563 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1564 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001565 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 }
1567
1568 /* Then free posted rx buffer that were not used */
1569 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001570 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001571 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 put_page(page_info->page);
1573 memset(page_info, 0, sizeof(*page_info));
1574 }
1575 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001576 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577}
1578
Sathya Perla3c8def92011-06-12 20:01:58 +00001579static void be_tx_compl_clean(struct be_adapter *adapter,
1580 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581{
Sathya Perla3c8def92011-06-12 20:01:58 +00001582 struct be_queue_info *tx_cq = &txo->cq;
1583 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001584 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001585 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001586 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001587 struct sk_buff *sent_skb;
1588 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perlaa8e91792009-08-10 03:42:43 +00001590 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1591 do {
1592 while ((txcp = be_tx_compl_get(tx_cq))) {
1593 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1594 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001595 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001596 cmpl++;
1597 }
1598 if (cmpl) {
1599 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001600 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001601 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001602 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001603 }
1604
1605 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1606 break;
1607
1608 mdelay(1);
1609 } while (true);
1610
1611 if (atomic_read(&txq->used))
1612 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1613 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001614
1615 /* free posted tx for which compls will never arrive */
1616 while (atomic_read(&txq->used)) {
1617 sent_skb = sent_skbs[txq->tail];
1618 end_idx = txq->tail;
1619 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001620 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1621 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001623 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001624 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625}
1626
Sathya Perla5fb379e2009-06-18 00:02:59 +00001627static void be_mcc_queues_destroy(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630
Sathya Perla8788fdc2009-07-27 22:52:03 +00001631 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001633 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634 be_queue_free(adapter, q);
1635
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001638 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639 be_queue_free(adapter, q);
1640}
1641
1642/* Must be called only after TX qs are created as MCC shares TX EQ */
1643static int be_mcc_queues_create(struct be_adapter *adapter)
1644{
1645 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646
1647 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001648 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001649 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001650 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001651 goto err;
1652
1653 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001654 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001655 goto mcc_cq_free;
1656
1657 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001658 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001659 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1660 goto mcc_cq_destroy;
1661
1662 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001663 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001664 goto mcc_q_free;
1665
1666 return 0;
1667
1668mcc_q_free:
1669 be_queue_free(adapter, q);
1670mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001671 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001672mcc_cq_free:
1673 be_queue_free(adapter, cq);
1674err:
1675 return -1;
1676}
1677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678static void be_tx_queues_destroy(struct be_adapter *adapter)
1679{
1680 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001681 struct be_tx_obj *txo;
1682 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683
Sathya Perla3c8def92011-06-12 20:01:58 +00001684 for_all_tx_queues(adapter, txo, i) {
1685 q = &txo->q;
1686 if (q->created)
1687 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1688 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689
Sathya Perla3c8def92011-06-12 20:01:58 +00001690 q = &txo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1694 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Sathya Perla859b1e42009-08-10 03:43:51 +00001696 /* Clear any residual events */
1697 be_eq_clean(adapter, &adapter->tx_eq);
1698
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 q = &adapter->tx_eq.q;
1700 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001701 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 be_queue_free(adapter, q);
1703}
1704
Sathya Perladafc0fe2011-10-24 02:45:02 +00001705static int be_num_txqs_want(struct be_adapter *adapter)
1706{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001707 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001708 lancer_chip(adapter) || !be_physfn(adapter) ||
1709 adapter->generation == BE_GEN2)
1710 return 1;
1711 else
1712 return MAX_TX_QS;
1713}
1714
Sathya Perla3c8def92011-06-12 20:01:58 +00001715/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716static int be_tx_queues_create(struct be_adapter *adapter)
1717{
1718 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001719 struct be_tx_obj *txo;
1720 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perladafc0fe2011-10-24 02:45:02 +00001722 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001723 if (adapter->num_tx_qs != MAX_TX_QS) {
1724 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001725 netif_set_real_num_tx_queues(adapter->netdev,
1726 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001727 rtnl_unlock();
1728 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001729
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 adapter->tx_eq.max_eqd = 0;
1731 adapter->tx_eq.min_eqd = 0;
1732 adapter->tx_eq.cur_eqd = 96;
1733 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001736 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 return -1;
1739
Sathya Perla8788fdc2009-07-27 22:52:03 +00001740 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001741 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001742 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001743
Sathya Perla3c8def92011-06-12 20:01:58 +00001744 for_all_tx_queues(adapter, txo, i) {
1745 cq = &txo->cq;
1746 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001748 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749
Sathya Perla3c8def92011-06-12 20:01:58 +00001750 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1751 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3c8def92011-06-12 20:01:58 +00001753 q = &txo->q;
1754 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1755 sizeof(struct be_eth_wrb)))
1756 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001757 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 return 0;
1759
Sathya Perla3c8def92011-06-12 20:01:58 +00001760err:
1761 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 return -1;
1763}
1764
1765static void be_rx_queues_destroy(struct be_adapter *adapter)
1766{
1767 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001768 struct be_rx_obj *rxo;
1769 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Sathya Perla3abcded2010-10-03 22:12:27 -07001771 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001772 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001773
Sathya Perla3abcded2010-10-03 22:12:27 -07001774 q = &rxo->cq;
1775 if (q->created)
1776 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777 be_queue_free(adapter, q);
1778
Sathya Perla3abcded2010-10-03 22:12:27 -07001779 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001780 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001782 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784}
1785
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001786static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787{
Sathya Perlac814fd32011-06-26 20:41:25 +00001788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla11ac75e2011-12-13 00:58:50 +00001789 !sriov_enabled(adapter) && be_physfn(adapter) &&
1790 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001791 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1792 } else {
1793 dev_warn(&adapter->pdev->dev,
1794 "No support for multiple RX queues\n");
1795 return 1;
1796 }
1797}
1798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799static int be_rx_queues_create(struct be_adapter *adapter)
1800{
1801 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001802 struct be_rx_obj *rxo;
1803 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001805 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1806 msix_enabled(adapter) ?
1807 adapter->num_msix_vec - 1 : 1);
1808 if (adapter->num_rx_qs != MAX_RX_QS)
1809 dev_warn(&adapter->pdev->dev,
1810 "Can create only %d RX queues", adapter->num_rx_qs);
1811
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 for_all_rx_queues(adapter, rxo, i) {
1814 rxo->adapter = adapter;
1815 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1816 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817
Sathya Perla3abcded2010-10-03 22:12:27 -07001818 /* EQ */
1819 eq = &rxo->rx_eq.q;
1820 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1821 sizeof(struct be_eq_entry));
1822 if (rc)
1823 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1826 if (rc)
1827 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001829 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001830
Sathya Perla3abcded2010-10-03 22:12:27 -07001831 /* CQ */
1832 cq = &rxo->cq;
1833 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1834 sizeof(struct be_eth_rx_compl));
1835 if (rc)
1836 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1839 if (rc)
1840 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001841
1842 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001843 q = &rxo->q;
1844 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1845 sizeof(struct be_eth_rx_d));
1846 if (rc)
1847 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848
Sathya Perla3abcded2010-10-03 22:12:27 -07001849 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850
1851 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001852err:
1853 be_rx_queues_destroy(adapter);
1854 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001857static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001858{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001859 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1860 if (!eqe->evt)
1861 return false;
1862 else
1863 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001864}
1865
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866static irqreturn_t be_intx(int irq, void *dev)
1867{
1868 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001870 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001872 if (lancer_chip(adapter)) {
1873 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001875 for_all_rx_queues(adapter, rxo, i) {
1876 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001877 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001880 if (!(tx || rx))
1881 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001882
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001883 } else {
1884 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1885 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1886 if (!isr)
1887 return IRQ_NONE;
1888
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001889 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001890 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001891
1892 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001893 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001894 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001895 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001896 }
Sathya Perlac001c212009-07-01 01:06:07 +00001897
Sathya Perla8788fdc2009-07-27 22:52:03 +00001898 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899}
1900
1901static irqreturn_t be_msix_rx(int irq, void *dev)
1902{
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 struct be_rx_obj *rxo = dev;
1904 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
Sathya Perla3c8def92011-06-12 20:01:58 +00001906 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
1908 return IRQ_HANDLED;
1909}
1910
Sathya Perla5fb379e2009-06-18 00:02:59 +00001911static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912{
1913 struct be_adapter *adapter = dev;
1914
Sathya Perla3c8def92011-06-12 20:01:58 +00001915 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
1917 return IRQ_HANDLED;
1918}
1919
Sathya Perla2e588f82011-03-11 02:49:26 +00001920static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921{
Sathya Perla2e588f82011-03-11 02:49:26 +00001922 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923}
1924
stephen hemminger49b05222010-10-21 07:50:48 +00001925static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926{
1927 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001928 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1929 struct be_adapter *adapter = rxo->adapter;
1930 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001931 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 u32 work_done;
1933
Sathya Perlaac124ff2011-07-25 19:10:14 +00001934 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 if (!rxcp)
1938 break;
1939
Sathya Perla12004ae2011-08-02 19:57:46 +00001940 /* Is it a flush compl that has no data */
1941 if (unlikely(rxcp->num_rcvd == 0))
1942 goto loop_continue;
1943
1944 /* Discard compl with partial DMA Lancer B0 */
1945 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001946 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001947 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001948 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001949
Sathya Perla12004ae2011-08-02 19:57:46 +00001950 /* On BE drop pkts that arrive due to imperfect filtering in
1951 * promiscuous mode on some skews
1952 */
1953 if (unlikely(rxcp->port != adapter->port_num &&
1954 !lancer_chip(adapter))) {
1955 be_rx_compl_discard(adapter, rxo, rxcp);
1956 goto loop_continue;
1957 }
1958
1959 if (do_gro(rxcp))
1960 be_rx_compl_process_gro(adapter, rxo, rxcp);
1961 else
1962 be_rx_compl_process(adapter, rxo, rxcp);
1963loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001964 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 }
1966
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001967 be_cq_notify(adapter, rx_cq->id, false, work_done);
1968
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001970 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001971 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972
1973 /* All consumed */
1974 if (work_done < budget) {
1975 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001976 /* Arm CQ */
1977 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 }
1979 return work_done;
1980}
1981
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001982/* As TX and MCC share the same EQ check for both TX and MCC completions.
1983 * For TX/MCC we don't honour budget; consume everything
1984 */
1985static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001987 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1988 struct be_adapter *adapter =
1989 container_of(tx_eq, struct be_adapter, tx_eq);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001990 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla3c8def92011-06-12 20:01:58 +00001991 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 int tx_compl, mcc_compl, status = 0;
1994 u8 i;
1995 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Sathya Perla3c8def92011-06-12 20:01:58 +00001997 for_all_tx_queues(adapter, txo, i) {
1998 tx_compl = 0;
1999 num_wrbs = 0;
2000 while ((txcp = be_tx_compl_get(&txo->cq))) {
2001 num_wrbs += be_tx_compl_process(adapter, txo,
2002 AMAP_GET_BITS(struct amap_eth_tx_compl,
2003 wrb_index, txcp));
2004 tx_compl++;
2005 }
2006 if (tx_compl) {
2007 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2008
2009 atomic_sub(num_wrbs, &txo->q.used);
2010
2011 /* As Tx wrbs have been freed up, wake up netdev queue
2012 * if it was stopped due to lack of tx wrbs. */
2013 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2014 atomic_read(&txo->q.used) < txo->q.len / 2) {
2015 netif_wake_subqueue(adapter->netdev, i);
2016 }
2017
Sathya Perlaab1594e2011-07-25 19:10:15 +00002018 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002019 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002020 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002021 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 }
2023
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002024 mcc_compl = be_process_mcc(adapter, &status);
2025
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002026 if (mcc_compl) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002027 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2028 }
2029
Sathya Perla3c8def92011-06-12 20:01:58 +00002030 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002031
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002032 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2033 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2034 for_all_tx_queues(adapter, txo, i)
2035 be_cq_notify(adapter, txo->cq.id, true, 0);
2036
2037 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2038 }
2039
Sathya Perla3c8def92011-06-12 20:01:58 +00002040 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002041 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 return 1;
2043}
2044
Ajit Khaparded053de92010-09-03 06:23:30 +00002045void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002046{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002047 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2048 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002049 u32 i;
2050
Sathya Perla72f02482011-11-10 19:17:58 +00002051 if (adapter->eeh_err || adapter->ue_detected)
2052 return;
2053
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002054 if (lancer_chip(adapter)) {
2055 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2056 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2057 sliport_err1 = ioread32(adapter->db +
2058 SLIPORT_ERROR1_OFFSET);
2059 sliport_err2 = ioread32(adapter->db +
2060 SLIPORT_ERROR2_OFFSET);
2061 }
2062 } else {
2063 pci_read_config_dword(adapter->pdev,
2064 PCICFG_UE_STATUS_LOW, &ue_lo);
2065 pci_read_config_dword(adapter->pdev,
2066 PCICFG_UE_STATUS_HIGH, &ue_hi);
2067 pci_read_config_dword(adapter->pdev,
2068 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2069 pci_read_config_dword(adapter->pdev,
2070 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002071
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002072 ue_lo = (ue_lo & (~ue_lo_mask));
2073 ue_hi = (ue_hi & (~ue_hi_mask));
2074 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002075
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002076 if (ue_lo || ue_hi ||
2077 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002078 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002079 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002080 dev_err(&adapter->pdev->dev,
2081 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002082 }
2083
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002084 if (ue_lo) {
2085 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2086 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002087 dev_err(&adapter->pdev->dev,
2088 "UE: %s bit set\n", ue_status_low_desc[i]);
2089 }
2090 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002091 if (ue_hi) {
2092 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2093 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002094 dev_err(&adapter->pdev->dev,
2095 "UE: %s bit set\n", ue_status_hi_desc[i]);
2096 }
2097 }
2098
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002099 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2100 dev_err(&adapter->pdev->dev,
2101 "sliport status 0x%x\n", sliport_status);
2102 dev_err(&adapter->pdev->dev,
2103 "sliport error1 0x%x\n", sliport_err1);
2104 dev_err(&adapter->pdev->dev,
2105 "sliport error2 0x%x\n", sliport_err2);
2106 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002107}
2108
Sathya Perla8d56ff12009-11-22 22:02:26 +00002109static void be_msix_disable(struct be_adapter *adapter)
2110{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002111 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002112 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002113 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002114 }
2115}
2116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117static void be_msix_enable(struct be_adapter *adapter)
2118{
Sathya Perla3abcded2010-10-03 22:12:27 -07002119#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002122 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002124 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125 adapter->msix_entries[i].entry = i;
2126
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002127 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 if (status == 0) {
2129 goto done;
2130 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002131 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002133 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002135 }
2136 return;
2137done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 adapter->num_msix_vec = num_vec;
2139 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140}
2141
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002142static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002143{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002144 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002145
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002146#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002147 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002148 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002149 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002150
2151 pos = pci_find_ext_capability(adapter->pdev,
2152 PCI_EXT_CAP_ID_SRIOV);
2153 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002154 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002155
Sathya Perla11ac75e2011-12-13 00:58:50 +00002156 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2157 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002158 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002159 "Device supports %d VFs and not %d\n",
2160 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002161
Sathya Perla11ac75e2011-12-13 00:58:50 +00002162 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2163 if (status)
2164 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002165
Sathya Perla11ac75e2011-12-13 00:58:50 +00002166 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002167 adapter->vf_cfg = kcalloc(num_vfs,
2168 sizeof(struct be_vf_cfg),
2169 GFP_KERNEL);
2170 if (!adapter->vf_cfg)
2171 return -ENOMEM;
2172 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002173 }
2174#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002175 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002176}
2177
2178static void be_sriov_disable(struct be_adapter *adapter)
2179{
2180#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002181 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002182 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002183 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002184 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002185 }
2186#endif
2187}
2188
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002189static inline int be_msix_vec_get(struct be_adapter *adapter,
2190 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002192 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002193}
2194
2195static int be_request_irq(struct be_adapter *adapter,
2196 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002197 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002198{
2199 struct net_device *netdev = adapter->netdev;
2200 int vec;
2201
2202 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002203 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002204 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002205}
2206
Sathya Perla3abcded2010-10-03 22:12:27 -07002207static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2208 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002209{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002210 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212}
2213
2214static int be_msix_register(struct be_adapter *adapter)
2215{
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 struct be_rx_obj *rxo;
2217 int status, i;
2218 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2221 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 if (status)
2223 goto err;
2224
Sathya Perla3abcded2010-10-03 22:12:27 -07002225 for_all_rx_queues(adapter, rxo, i) {
2226 sprintf(qname, "rxq%d", i);
2227 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2228 qname, rxo);
2229 if (status)
2230 goto err_msix;
2231 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002234
Sathya Perla3abcded2010-10-03 22:12:27 -07002235err_msix:
2236 be_free_irq(adapter, &adapter->tx_eq, adapter);
2237
2238 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2239 be_free_irq(adapter, &rxo->rx_eq, rxo);
2240
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241err:
2242 dev_warn(&adapter->pdev->dev,
2243 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002244 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245 return status;
2246}
2247
2248static int be_irq_register(struct be_adapter *adapter)
2249{
2250 struct net_device *netdev = adapter->netdev;
2251 int status;
2252
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002253 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 status = be_msix_register(adapter);
2255 if (status == 0)
2256 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 /* INTx is not supported for VF */
2258 if (!be_physfn(adapter))
2259 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260 }
2261
2262 /* INTx */
2263 netdev->irq = adapter->pdev->irq;
2264 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2265 adapter);
2266 if (status) {
2267 dev_err(&adapter->pdev->dev,
2268 "INTx request IRQ failed - err %d\n", status);
2269 return status;
2270 }
2271done:
2272 adapter->isr_registered = true;
2273 return 0;
2274}
2275
2276static void be_irq_unregister(struct be_adapter *adapter)
2277{
2278 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002279 struct be_rx_obj *rxo;
2280 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
2282 if (!adapter->isr_registered)
2283 return;
2284
2285 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002286 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287 free_irq(netdev->irq, adapter);
2288 goto done;
2289 }
2290
2291 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 be_free_irq(adapter, &adapter->tx_eq, adapter);
2293
2294 for_all_rx_queues(adapter, rxo, i)
2295 be_free_irq(adapter, &rxo->rx_eq, rxo);
2296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297done:
2298 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299}
2300
Sathya Perla482c9e72011-06-29 23:33:17 +00002301static void be_rx_queues_clear(struct be_adapter *adapter)
2302{
2303 struct be_queue_info *q;
2304 struct be_rx_obj *rxo;
2305 int i;
2306
2307 for_all_rx_queues(adapter, rxo, i) {
2308 q = &rxo->q;
2309 if (q->created) {
2310 be_cmd_rxq_destroy(adapter, q);
2311 /* After the rxq is invalidated, wait for a grace time
2312 * of 1ms for all dma to end and the flush compl to
2313 * arrive
2314 */
2315 mdelay(1);
2316 be_rx_q_clean(adapter, rxo);
2317 }
2318
2319 /* Clear any residual events */
2320 q = &rxo->rx_eq.q;
2321 if (q->created)
2322 be_eq_clean(adapter, &rxo->rx_eq);
2323 }
2324}
2325
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326static int be_close(struct net_device *netdev)
2327{
2328 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002329 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002330 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002331 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002332 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333
Sathya Perla889cd4b2010-05-30 23:33:45 +00002334 be_async_mcc_disable(adapter);
2335
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002336 if (!lancer_chip(adapter))
2337 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002338
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002339 for_all_rx_queues(adapter, rxo, i)
2340 napi_disable(&rxo->rx_eq.napi);
2341
2342 napi_disable(&tx_eq->napi);
2343
2344 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002345 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2346 for_all_rx_queues(adapter, rxo, i)
2347 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002348 for_all_tx_queues(adapter, txo, i)
2349 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002350 }
2351
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002352 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002353 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002354 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002355
2356 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002357 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002358 synchronize_irq(vec);
2359 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002360 } else {
2361 synchronize_irq(netdev->irq);
2362 }
2363 be_irq_unregister(adapter);
2364
Sathya Perla889cd4b2010-05-30 23:33:45 +00002365 /* Wait for all pending tx completions to arrive so that
2366 * all tx skbs are freed.
2367 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002368 for_all_tx_queues(adapter, txo, i)
2369 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002370
Sathya Perla482c9e72011-06-29 23:33:17 +00002371 be_rx_queues_clear(adapter);
2372 return 0;
2373}
2374
2375static int be_rx_queues_setup(struct be_adapter *adapter)
2376{
2377 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002378 int rc, i, j;
2379 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002380
2381 for_all_rx_queues(adapter, rxo, i) {
2382 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2383 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2384 adapter->if_handle,
2385 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2386 if (rc)
2387 return rc;
2388 }
2389
2390 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002391 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2392 for_all_rss_queues(adapter, rxo, i) {
2393 if ((j + i) >= 128)
2394 break;
2395 rsstable[j + i] = rxo->rss_id;
2396 }
2397 }
2398 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002399
Sathya Perla482c9e72011-06-29 23:33:17 +00002400 if (rc)
2401 return rc;
2402 }
2403
2404 /* First time posting */
2405 for_all_rx_queues(adapter, rxo, i) {
2406 be_post_rx_frags(rxo, GFP_KERNEL);
2407 napi_enable(&rxo->rx_eq.napi);
2408 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 return 0;
2410}
2411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412static int be_open(struct net_device *netdev)
2413{
2414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002415 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002416 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002417 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002418
Sathya Perla482c9e72011-06-29 23:33:17 +00002419 status = be_rx_queues_setup(adapter);
2420 if (status)
2421 goto err;
2422
Sathya Perla5fb379e2009-06-18 00:02:59 +00002423 napi_enable(&tx_eq->napi);
2424
2425 be_irq_register(adapter);
2426
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429
2430 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002431 for_all_rx_queues(adapter, rxo, i) {
2432 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2433 be_cq_notify(adapter, rxo->cq.id, true, 0);
2434 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002435 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002436
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002437 /* Now that interrupts are on we can process async mcc */
2438 be_async_mcc_enable(adapter);
2439
Sathya Perla889cd4b2010-05-30 23:33:45 +00002440 return 0;
2441err:
2442 be_close(adapter->netdev);
2443 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002444}
2445
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002446static int be_setup_wol(struct be_adapter *adapter, bool enable)
2447{
2448 struct be_dma_mem cmd;
2449 int status = 0;
2450 u8 mac[ETH_ALEN];
2451
2452 memset(mac, 0, ETH_ALEN);
2453
2454 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002455 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2456 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002457 if (cmd.va == NULL)
2458 return -1;
2459 memset(cmd.va, 0, cmd.size);
2460
2461 if (enable) {
2462 status = pci_write_config_dword(adapter->pdev,
2463 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2464 if (status) {
2465 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002466 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002467 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2468 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002469 return status;
2470 }
2471 status = be_cmd_enable_magic_wol(adapter,
2472 adapter->netdev->dev_addr, &cmd);
2473 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2474 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2475 } else {
2476 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2477 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2478 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2479 }
2480
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002481 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002482 return status;
2483}
2484
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002485/*
2486 * Generate a seed MAC address from the PF MAC Address using jhash.
2487 * MAC Address for VFs are assigned incrementally starting from the seed.
2488 * These addresses are programmed in the ASIC by the PF and the VF driver
2489 * queries for the MAC address during its probe.
2490 */
2491static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2492{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002493 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002494 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002495 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497
2498 be_vf_eth_addr_generate(adapter, mac);
2499
Sathya Perla11ac75e2011-12-13 00:58:50 +00002500 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002501 if (lancer_chip(adapter)) {
2502 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2503 } else {
2504 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 vf_cfg->if_handle,
2506 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002507 }
2508
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002509 if (status)
2510 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002511 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002512 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002513 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002514
2515 mac[5] += 1;
2516 }
2517 return status;
2518}
2519
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002520static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002521{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002522 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002523 u32 vf;
2524
Sathya Perla11ac75e2011-12-13 00:58:50 +00002525 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002526 if (lancer_chip(adapter))
2527 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2528 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002529 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2530 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002531
Sathya Perla11ac75e2011-12-13 00:58:50 +00002532 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2533 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002534}
2535
Sathya Perlaa54769f2011-10-24 02:45:00 +00002536static int be_clear(struct be_adapter *adapter)
2537{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002538 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002539 be_vf_clear(adapter);
2540
2541 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542
2543 be_mcc_queues_destroy(adapter);
2544 be_rx_queues_destroy(adapter);
2545 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002546
2547 /* tell fw we're done with firing cmds */
2548 be_cmd_fw_clean(adapter);
2549 return 0;
2550}
2551
Sathya Perla30128032011-11-10 19:17:57 +00002552static void be_vf_setup_init(struct be_adapter *adapter)
2553{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002554 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002555 int vf;
2556
Sathya Perla11ac75e2011-12-13 00:58:50 +00002557 for_all_vfs(adapter, vf_cfg, vf) {
2558 vf_cfg->if_handle = -1;
2559 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002560 }
2561}
2562
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002563static int be_vf_setup(struct be_adapter *adapter)
2564{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002566 u32 cap_flags, en_flags, vf;
2567 u16 lnk_speed;
2568 int status;
2569
Sathya Perla30128032011-11-10 19:17:57 +00002570 be_vf_setup_init(adapter);
2571
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002572 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2573 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002574 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002575 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002576 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577 if (status)
2578 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002579 }
2580
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002581 status = be_vf_eth_addr_config(adapter);
2582 if (status)
2583 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002584
Sathya Perla11ac75e2011-12-13 00:58:50 +00002585 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002586 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002587 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002588 if (status)
2589 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002590 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002591 }
2592 return 0;
2593err:
2594 return status;
2595}
2596
Sathya Perla30128032011-11-10 19:17:57 +00002597static void be_setup_init(struct be_adapter *adapter)
2598{
2599 adapter->vlan_prio_bmap = 0xff;
2600 adapter->link_speed = -1;
2601 adapter->if_handle = -1;
2602 adapter->be3_native = false;
2603 adapter->promiscuous = false;
2604 adapter->eq_next_idx = 0;
2605}
2606
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002607static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2608{
2609 u32 pmac_id;
2610 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2611 if (status != 0)
2612 goto do_none;
2613 status = be_cmd_mac_addr_query(adapter, mac,
2614 MAC_ADDRESS_TYPE_NETWORK,
2615 false, adapter->if_handle, pmac_id);
2616 if (status != 0)
2617 goto do_none;
2618 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2619 &adapter->pmac_id, 0);
2620do_none:
2621 return status;
2622}
2623
Sathya Perla5fb379e2009-06-18 00:02:59 +00002624static int be_setup(struct be_adapter *adapter)
2625{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002626 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002627 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002628 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002629 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002630 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002631 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632
Sathya Perla30128032011-11-10 19:17:57 +00002633 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002634
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002635 be_cmd_req_native_mode(adapter);
2636
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637 status = be_tx_queues_create(adapter);
2638 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002639 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640
2641 status = be_rx_queues_create(adapter);
2642 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002643 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644
Sathya Perla5fb379e2009-06-18 00:02:59 +00002645 status = be_mcc_queues_create(adapter);
2646 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002647 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002649 memset(mac, 0, ETH_ALEN);
2650 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002651 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002652 if (status)
2653 return status;
2654 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2655 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2656
2657 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2658 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2659 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002660 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2661
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002662 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2663 cap_flags |= BE_IF_FLAGS_RSS;
2664 en_flags |= BE_IF_FLAGS_RSS;
2665 }
2666 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2667 netdev->dev_addr, &adapter->if_handle,
2668 &adapter->pmac_id, 0);
2669 if (status != 0)
2670 goto err;
2671
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002672 for_all_tx_queues(adapter, txo, i) {
2673 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2674 if (status)
2675 goto err;
2676 }
2677
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002678 /* The VF's permanent mac queried from card is incorrect.
2679 * For BEx: Query the mac configued by the PF using if_handle
2680 * For Lancer: Get and use mac_list to obtain mac address.
2681 */
2682 if (!be_physfn(adapter)) {
2683 if (lancer_chip(adapter))
2684 status = be_configure_mac_from_list(adapter, mac);
2685 else
2686 status = be_cmd_mac_addr_query(adapter, mac,
2687 MAC_ADDRESS_TYPE_NETWORK, false,
2688 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002689 if (!status) {
2690 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2691 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2692 }
2693 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002694
Sathya Perla04b71172011-09-27 13:30:27 -04002695 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002696
Sathya Perlaa54769f2011-10-24 02:45:00 +00002697 status = be_vid_config(adapter, false, 0);
2698 if (status)
2699 goto err;
2700
2701 be_set_rx_mode(adapter->netdev);
2702
2703 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002704 /* For Lancer: It is legal for this cmd to fail on VF */
2705 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002706 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002707
Sathya Perlaa54769f2011-10-24 02:45:00 +00002708 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2709 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2710 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002711 /* For Lancer: It is legal for this cmd to fail on VF */
2712 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002713 goto err;
2714 }
2715
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002716 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717
Sathya Perla11ac75e2011-12-13 00:58:50 +00002718 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002719 status = be_vf_setup(adapter);
2720 if (status)
2721 goto err;
2722 }
2723
2724 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002725err:
2726 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727 return status;
2728}
2729
Ivan Vecera66268732011-12-08 01:31:21 +00002730#ifdef CONFIG_NET_POLL_CONTROLLER
2731static void be_netpoll(struct net_device *netdev)
2732{
2733 struct be_adapter *adapter = netdev_priv(netdev);
2734 struct be_rx_obj *rxo;
2735 int i;
2736
2737 event_handle(adapter, &adapter->tx_eq, false);
2738 for_all_rx_queues(adapter, rxo, i)
2739 event_handle(adapter, &rxo->rx_eq, true);
2740}
2741#endif
2742
Ajit Khaparde84517482009-09-04 03:12:16 +00002743#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002744static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002745 const u8 *p, u32 img_start, int image_size,
2746 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002747{
2748 u32 crc_offset;
2749 u8 flashed_crc[4];
2750 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002751
2752 crc_offset = hdr_size + img_start + image_size - 4;
2753
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002754 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002755
2756 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002757 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002758 if (status) {
2759 dev_err(&adapter->pdev->dev,
2760 "could not get crc from flash, not flashing redboot\n");
2761 return false;
2762 }
2763
2764 /*update redboot only if crc does not match*/
2765 if (!memcmp(flashed_crc, p, 4))
2766 return false;
2767 else
2768 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002769}
2770
Sathya Perla306f1342011-08-02 19:57:45 +00002771static bool phy_flashing_required(struct be_adapter *adapter)
2772{
2773 int status = 0;
2774 struct be_phy_info phy_info;
2775
2776 status = be_cmd_get_phy_info(adapter, &phy_info);
2777 if (status)
2778 return false;
2779 if ((phy_info.phy_type == TN_8022) &&
2780 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2781 return true;
2782 }
2783 return false;
2784}
2785
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002786static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002787 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002788 struct be_dma_mem *flash_cmd, int num_of_images)
2789
Ajit Khaparde84517482009-09-04 03:12:16 +00002790{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002791 int status = 0, i, filehdr_size = 0;
2792 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002793 int num_bytes;
2794 const u8 *p = fw->data;
2795 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002796 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002797 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002798
Sathya Perla306f1342011-08-02 19:57:45 +00002799 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002800 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2801 FLASH_IMAGE_MAX_SIZE_g3},
2802 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2803 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2804 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2805 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2806 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2807 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2808 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2809 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2810 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2811 FLASH_IMAGE_MAX_SIZE_g3},
2812 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2813 FLASH_IMAGE_MAX_SIZE_g3},
2814 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002815 FLASH_IMAGE_MAX_SIZE_g3},
2816 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002817 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2818 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2819 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002820 };
Joe Perches215faf92010-12-21 02:16:10 -08002821 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002822 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2823 FLASH_IMAGE_MAX_SIZE_g2},
2824 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2825 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2826 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2827 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2828 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2829 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2830 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2831 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2832 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2833 FLASH_IMAGE_MAX_SIZE_g2},
2834 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2835 FLASH_IMAGE_MAX_SIZE_g2},
2836 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2837 FLASH_IMAGE_MAX_SIZE_g2}
2838 };
2839
2840 if (adapter->generation == BE_GEN3) {
2841 pflashcomp = gen3_flash_types;
2842 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002843 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002844 } else {
2845 pflashcomp = gen2_flash_types;
2846 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002847 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002848 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002849 for (i = 0; i < num_comp; i++) {
2850 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2851 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2852 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002853 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2854 if (!phy_flashing_required(adapter))
2855 continue;
2856 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002857 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2858 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002859 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2860 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002861 continue;
2862 p = fw->data;
2863 p += filehdr_size + pflashcomp[i].offset
2864 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002865 if (p + pflashcomp[i].size > fw->data + fw->size)
2866 return -1;
2867 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002868 while (total_bytes) {
2869 if (total_bytes > 32*1024)
2870 num_bytes = 32*1024;
2871 else
2872 num_bytes = total_bytes;
2873 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002874 if (!total_bytes) {
2875 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2876 flash_op = FLASHROM_OPER_PHY_FLASH;
2877 else
2878 flash_op = FLASHROM_OPER_FLASH;
2879 } else {
2880 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2881 flash_op = FLASHROM_OPER_PHY_SAVE;
2882 else
2883 flash_op = FLASHROM_OPER_SAVE;
2884 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002885 memcpy(req->params.data_buf, p, num_bytes);
2886 p += num_bytes;
2887 status = be_cmd_write_flashrom(adapter, flash_cmd,
2888 pflashcomp[i].optype, flash_op, num_bytes);
2889 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002890 if ((status == ILLEGAL_IOCTL_REQ) &&
2891 (pflashcomp[i].optype ==
2892 IMG_TYPE_PHY_FW))
2893 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002894 dev_err(&adapter->pdev->dev,
2895 "cmd to write to flash rom failed.\n");
2896 return -1;
2897 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002898 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002899 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002900 return 0;
2901}
2902
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002903static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2904{
2905 if (fhdr == NULL)
2906 return 0;
2907 if (fhdr->build[0] == '3')
2908 return BE_GEN3;
2909 else if (fhdr->build[0] == '2')
2910 return BE_GEN2;
2911 else
2912 return 0;
2913}
2914
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002915static int lancer_fw_download(struct be_adapter *adapter,
2916 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002917{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002918#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2919#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2920 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002921 const u8 *data_ptr = NULL;
2922 u8 *dest_image_ptr = NULL;
2923 size_t image_size = 0;
2924 u32 chunk_size = 0;
2925 u32 data_written = 0;
2926 u32 offset = 0;
2927 int status = 0;
2928 u8 add_status = 0;
2929
2930 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2931 dev_err(&adapter->pdev->dev,
2932 "FW Image not properly aligned. "
2933 "Length must be 4 byte aligned.\n");
2934 status = -EINVAL;
2935 goto lancer_fw_exit;
2936 }
2937
2938 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2939 + LANCER_FW_DOWNLOAD_CHUNK;
2940 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2941 &flash_cmd.dma, GFP_KERNEL);
2942 if (!flash_cmd.va) {
2943 status = -ENOMEM;
2944 dev_err(&adapter->pdev->dev,
2945 "Memory allocation failure while flashing\n");
2946 goto lancer_fw_exit;
2947 }
2948
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002949 dest_image_ptr = flash_cmd.va +
2950 sizeof(struct lancer_cmd_req_write_object);
2951 image_size = fw->size;
2952 data_ptr = fw->data;
2953
2954 while (image_size) {
2955 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2956
2957 /* Copy the image chunk content. */
2958 memcpy(dest_image_ptr, data_ptr, chunk_size);
2959
2960 status = lancer_cmd_write_object(adapter, &flash_cmd,
2961 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2962 &data_written, &add_status);
2963
2964 if (status)
2965 break;
2966
2967 offset += data_written;
2968 data_ptr += data_written;
2969 image_size -= data_written;
2970 }
2971
2972 if (!status) {
2973 /* Commit the FW written */
2974 status = lancer_cmd_write_object(adapter, &flash_cmd,
2975 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2976 &data_written, &add_status);
2977 }
2978
2979 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2980 flash_cmd.dma);
2981 if (status) {
2982 dev_err(&adapter->pdev->dev,
2983 "Firmware load error. "
2984 "Status code: 0x%x Additional Status: 0x%x\n",
2985 status, add_status);
2986 goto lancer_fw_exit;
2987 }
2988
2989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2990lancer_fw_exit:
2991 return status;
2992}
2993
2994static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2995{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002996 struct flash_file_hdr_g2 *fhdr;
2997 struct flash_file_hdr_g3 *fhdr3;
2998 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002999 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003000 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003001 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003002
3003 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003004 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003005
Ajit Khaparde84517482009-09-04 03:12:16 +00003006 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003007 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3008 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003009 if (!flash_cmd.va) {
3010 status = -ENOMEM;
3011 dev_err(&adapter->pdev->dev,
3012 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003013 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003014 }
3015
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003016 if ((adapter->generation == BE_GEN3) &&
3017 (get_ufigen_type(fhdr) == BE_GEN3)) {
3018 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003019 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3020 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003021 img_hdr_ptr = (struct image_hdr *) (fw->data +
3022 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003023 i * sizeof(struct image_hdr)));
3024 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3025 status = be_flash_data(adapter, fw, &flash_cmd,
3026 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003027 }
3028 } else if ((adapter->generation == BE_GEN2) &&
3029 (get_ufigen_type(fhdr) == BE_GEN2)) {
3030 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3031 } else {
3032 dev_err(&adapter->pdev->dev,
3033 "UFI and Interface are not compatible for flashing\n");
3034 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003035 }
3036
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003037 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3038 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003039 if (status) {
3040 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003041 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003042 }
3043
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003044 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003045
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003046be_fw_exit:
3047 return status;
3048}
3049
3050int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3051{
3052 const struct firmware *fw;
3053 int status;
3054
3055 if (!netif_running(adapter->netdev)) {
3056 dev_err(&adapter->pdev->dev,
3057 "Firmware load not allowed (interface is down)\n");
3058 return -1;
3059 }
3060
3061 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3062 if (status)
3063 goto fw_exit;
3064
3065 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3066
3067 if (lancer_chip(adapter))
3068 status = lancer_fw_download(adapter, fw);
3069 else
3070 status = be_fw_download(adapter, fw);
3071
Ajit Khaparde84517482009-09-04 03:12:16 +00003072fw_exit:
3073 release_firmware(fw);
3074 return status;
3075}
3076
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077static struct net_device_ops be_netdev_ops = {
3078 .ndo_open = be_open,
3079 .ndo_stop = be_close,
3080 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003081 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082 .ndo_set_mac_address = be_mac_addr_set,
3083 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003084 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3087 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003088 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003089 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003090 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003091 .ndo_get_vf_config = be_get_vf_config,
3092#ifdef CONFIG_NET_POLL_CONTROLLER
3093 .ndo_poll_controller = be_netpoll,
3094#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095};
3096
3097static void be_netdev_init(struct net_device *netdev)
3098{
3099 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003100 struct be_rx_obj *rxo;
3101 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003102
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003103 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003104 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3105 NETIF_F_HW_VLAN_TX;
3106 if (be_multi_rxq(adapter))
3107 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003108
3109 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003110 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003111
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003112 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003113 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115 netdev->flags |= IFF_MULTICAST;
3116
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003117 netif_set_gso_max_size(netdev, 65535);
3118
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3120
3121 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3122
Sathya Perla3abcded2010-10-03 22:12:27 -07003123 for_all_rx_queues(adapter, rxo, i)
3124 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3125 BE_NAPI_WEIGHT);
3126
Sathya Perla5fb379e2009-06-18 00:02:59 +00003127 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003129}
3130
3131static void be_unmap_pci_bars(struct be_adapter *adapter)
3132{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003133 if (adapter->csr)
3134 iounmap(adapter->csr);
3135 if (adapter->db)
3136 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003137}
3138
3139static int be_map_pci_bars(struct be_adapter *adapter)
3140{
3141 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003142 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003144 if (lancer_chip(adapter)) {
3145 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3146 pci_resource_len(adapter->pdev, 0));
3147 if (addr == NULL)
3148 return -ENOMEM;
3149 adapter->db = addr;
3150 return 0;
3151 }
3152
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003153 if (be_physfn(adapter)) {
3154 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3155 pci_resource_len(adapter->pdev, 2));
3156 if (addr == NULL)
3157 return -ENOMEM;
3158 adapter->csr = addr;
3159 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003160
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003161 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003162 db_reg = 4;
3163 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003164 if (be_physfn(adapter))
3165 db_reg = 4;
3166 else
3167 db_reg = 0;
3168 }
3169 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3170 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171 if (addr == NULL)
3172 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003173 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003174
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003175 return 0;
3176pci_map_err:
3177 be_unmap_pci_bars(adapter);
3178 return -ENOMEM;
3179}
3180
3181
3182static void be_ctrl_cleanup(struct be_adapter *adapter)
3183{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003184 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003185
3186 be_unmap_pci_bars(adapter);
3187
3188 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003189 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3190 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003191
Sathya Perla5b8821b2011-08-02 19:57:44 +00003192 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003193 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003194 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3195 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196}
3197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003198static int be_ctrl_init(struct be_adapter *adapter)
3199{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003200 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3201 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003202 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204
3205 status = be_map_pci_bars(adapter);
3206 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003207 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208
3209 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003210 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3211 mbox_mem_alloc->size,
3212 &mbox_mem_alloc->dma,
3213 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003215 status = -ENOMEM;
3216 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217 }
3218 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3219 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3220 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3221 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003222
Sathya Perla5b8821b2011-08-02 19:57:44 +00003223 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3224 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3225 &rx_filter->dma, GFP_KERNEL);
3226 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003227 status = -ENOMEM;
3228 goto free_mbox;
3229 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003230 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003231
Ivan Vecera29849612010-12-14 05:43:19 +00003232 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003233 spin_lock_init(&adapter->mcc_lock);
3234 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003235
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003236 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003237 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003239
3240free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003241 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3242 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003243
3244unmap_pci_bars:
3245 be_unmap_pci_bars(adapter);
3246
3247done:
3248 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003249}
3250
3251static void be_stats_cleanup(struct be_adapter *adapter)
3252{
Sathya Perla3abcded2010-10-03 22:12:27 -07003253 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254
3255 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003256 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3257 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003258}
3259
3260static int be_stats_init(struct be_adapter *adapter)
3261{
Sathya Perla3abcded2010-10-03 22:12:27 -07003262 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263
Selvin Xavier005d5692011-05-16 07:36:35 +00003264 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003265 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003266 } else {
3267 if (lancer_chip(adapter))
3268 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3269 else
3270 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3271 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003272 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3273 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274 if (cmd->va == NULL)
3275 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003276 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003277 return 0;
3278}
3279
3280static void __devexit be_remove(struct pci_dev *pdev)
3281{
3282 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 if (!adapter)
3285 return;
3286
Somnath Koturf203af72010-10-25 23:01:03 +00003287 cancel_delayed_work_sync(&adapter->work);
3288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003289 unregister_netdev(adapter->netdev);
3290
Sathya Perla5fb379e2009-06-18 00:02:59 +00003291 be_clear(adapter);
3292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293 be_stats_cleanup(adapter);
3294
3295 be_ctrl_cleanup(adapter);
3296
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003297 be_sriov_disable(adapter);
3298
Sathya Perla8d56ff12009-11-22 22:02:26 +00003299 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300
3301 pci_set_drvdata(pdev, NULL);
3302 pci_release_regions(pdev);
3303 pci_disable_device(pdev);
3304
3305 free_netdev(adapter->netdev);
3306}
3307
Sathya Perla2243e2e2009-11-22 22:02:03 +00003308static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003311
Sathya Perla3abcded2010-10-03 22:12:27 -07003312 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3313 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003314 if (status)
3315 return status;
3316
Sathya Perla752961a2011-10-24 02:45:03 +00003317 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003318 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3319 else
3320 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3321
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003322 status = be_cmd_get_cntl_attributes(adapter);
3323 if (status)
3324 return status;
3325
Sathya Perla2243e2e2009-11-22 22:02:03 +00003326 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327}
3328
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003329static int be_dev_family_check(struct be_adapter *adapter)
3330{
3331 struct pci_dev *pdev = adapter->pdev;
3332 u32 sli_intf = 0, if_type;
3333
3334 switch (pdev->device) {
3335 case BE_DEVICE_ID1:
3336 case OC_DEVICE_ID1:
3337 adapter->generation = BE_GEN2;
3338 break;
3339 case BE_DEVICE_ID2:
3340 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003341 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003342 adapter->generation = BE_GEN3;
3343 break;
3344 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003345 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003346 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3347 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3348 SLI_INTF_IF_TYPE_SHIFT;
3349
3350 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3351 if_type != 0x02) {
3352 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3353 return -EINVAL;
3354 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003355 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3356 SLI_INTF_FAMILY_SHIFT);
3357 adapter->generation = BE_GEN3;
3358 break;
3359 default:
3360 adapter->generation = 0;
3361 }
3362 return 0;
3363}
3364
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003365static int lancer_wait_ready(struct be_adapter *adapter)
3366{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003367#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003368 u32 sliport_status;
3369 int status = 0, i;
3370
3371 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3372 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3373 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3374 break;
3375
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003376 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003377 }
3378
3379 if (i == SLIPORT_READY_TIMEOUT)
3380 status = -1;
3381
3382 return status;
3383}
3384
3385static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3386{
3387 int status;
3388 u32 sliport_status, err, reset_needed;
3389 status = lancer_wait_ready(adapter);
3390 if (!status) {
3391 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3392 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3393 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3394 if (err && reset_needed) {
3395 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3396 adapter->db + SLIPORT_CONTROL_OFFSET);
3397
3398 /* check adapter has corrected the error */
3399 status = lancer_wait_ready(adapter);
3400 sliport_status = ioread32(adapter->db +
3401 SLIPORT_STATUS_OFFSET);
3402 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3403 SLIPORT_STATUS_RN_MASK);
3404 if (status || sliport_status)
3405 status = -1;
3406 } else if (err || reset_needed) {
3407 status = -1;
3408 }
3409 }
3410 return status;
3411}
3412
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003413static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3414{
3415 int status;
3416 u32 sliport_status;
3417
3418 if (adapter->eeh_err || adapter->ue_detected)
3419 return;
3420
3421 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3422
3423 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3424 dev_err(&adapter->pdev->dev,
3425 "Adapter in error state."
3426 "Trying to recover.\n");
3427
3428 status = lancer_test_and_set_rdy_state(adapter);
3429 if (status)
3430 goto err;
3431
3432 netif_device_detach(adapter->netdev);
3433
3434 if (netif_running(adapter->netdev))
3435 be_close(adapter->netdev);
3436
3437 be_clear(adapter);
3438
3439 adapter->fw_timeout = false;
3440
3441 status = be_setup(adapter);
3442 if (status)
3443 goto err;
3444
3445 if (netif_running(adapter->netdev)) {
3446 status = be_open(adapter->netdev);
3447 if (status)
3448 goto err;
3449 }
3450
3451 netif_device_attach(adapter->netdev);
3452
3453 dev_err(&adapter->pdev->dev,
3454 "Adapter error recovery succeeded\n");
3455 }
3456 return;
3457err:
3458 dev_err(&adapter->pdev->dev,
3459 "Adapter error recovery failed\n");
3460}
3461
3462static void be_worker(struct work_struct *work)
3463{
3464 struct be_adapter *adapter =
3465 container_of(work, struct be_adapter, work.work);
3466 struct be_rx_obj *rxo;
3467 int i;
3468
3469 if (lancer_chip(adapter))
3470 lancer_test_and_recover_fn_err(adapter);
3471
3472 be_detect_dump_ue(adapter);
3473
3474 /* when interrupts are not yet enabled, just reap any pending
3475 * mcc completions */
3476 if (!netif_running(adapter->netdev)) {
3477 int mcc_compl, status = 0;
3478
3479 mcc_compl = be_process_mcc(adapter, &status);
3480
3481 if (mcc_compl) {
3482 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3483 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3484 }
3485
3486 goto reschedule;
3487 }
3488
3489 if (!adapter->stats_cmd_sent) {
3490 if (lancer_chip(adapter))
3491 lancer_cmd_get_pport_stats(adapter,
3492 &adapter->stats_cmd);
3493 else
3494 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3495 }
3496
3497 for_all_rx_queues(adapter, rxo, i) {
3498 be_rx_eqd_update(adapter, rxo);
3499
3500 if (rxo->rx_post_starved) {
3501 rxo->rx_post_starved = false;
3502 be_post_rx_frags(rxo, GFP_KERNEL);
3503 }
3504 }
3505
3506reschedule:
3507 adapter->work_counter++;
3508 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3509}
3510
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003511static int __devinit be_probe(struct pci_dev *pdev,
3512 const struct pci_device_id *pdev_id)
3513{
3514 int status = 0;
3515 struct be_adapter *adapter;
3516 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517
3518 status = pci_enable_device(pdev);
3519 if (status)
3520 goto do_none;
3521
3522 status = pci_request_regions(pdev, DRV_NAME);
3523 if (status)
3524 goto disable_dev;
3525 pci_set_master(pdev);
3526
Sathya Perla3c8def92011-06-12 20:01:58 +00003527 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003528 if (netdev == NULL) {
3529 status = -ENOMEM;
3530 goto rel_reg;
3531 }
3532 adapter = netdev_priv(netdev);
3533 adapter->pdev = pdev;
3534 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003535
3536 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003537 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003538 goto free_netdev;
3539
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003540 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003541 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003542
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003543 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 if (!status) {
3545 netdev->features |= NETIF_F_HIGHDMA;
3546 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003547 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548 if (status) {
3549 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3550 goto free_netdev;
3551 }
3552 }
3553
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003554 status = be_sriov_enable(adapter);
3555 if (status)
3556 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003557
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 status = be_ctrl_init(adapter);
3559 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003560 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003562 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003563 status = lancer_wait_ready(adapter);
3564 if (!status) {
3565 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3566 adapter->db + SLIPORT_CONTROL_OFFSET);
3567 status = lancer_test_and_set_rdy_state(adapter);
3568 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003569 if (status) {
3570 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003571 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003572 }
3573 }
3574
Sathya Perla2243e2e2009-11-22 22:02:03 +00003575 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003576 if (be_physfn(adapter)) {
3577 status = be_cmd_POST(adapter);
3578 if (status)
3579 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003580 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003581
3582 /* tell fw we're ready to fire cmds */
3583 status = be_cmd_fw_init(adapter);
3584 if (status)
3585 goto ctrl_clean;
3586
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003587 status = be_cmd_reset_function(adapter);
3588 if (status)
3589 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003590
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003591 status = be_stats_init(adapter);
3592 if (status)
3593 goto ctrl_clean;
3594
Sathya Perla2243e2e2009-11-22 22:02:03 +00003595 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003596 if (status)
3597 goto stats_clean;
3598
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003599 /* The INTR bit may be set in the card when probed by a kdump kernel
3600 * after a crash.
3601 */
3602 if (!lancer_chip(adapter))
3603 be_intr_set(adapter, false);
3604
Sathya Perla3abcded2010-10-03 22:12:27 -07003605 be_msix_enable(adapter);
3606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003607 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003608 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003609
Sathya Perla5fb379e2009-06-18 00:02:59 +00003610 status = be_setup(adapter);
3611 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003612 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003613
Sathya Perla3abcded2010-10-03 22:12:27 -07003614 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003615 status = register_netdev(netdev);
3616 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003617 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003619 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003620
Somnath Koturf203af72010-10-25 23:01:03 +00003621 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622 return 0;
3623
Sathya Perla5fb379e2009-06-18 00:02:59 +00003624unsetup:
3625 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003626msix_disable:
3627 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628stats_clean:
3629 be_stats_cleanup(adapter);
3630ctrl_clean:
3631 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003632disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003633 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003634free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003635 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003636 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003637rel_reg:
3638 pci_release_regions(pdev);
3639disable_dev:
3640 pci_disable_device(pdev);
3641do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003642 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643 return status;
3644}
3645
3646static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3647{
3648 struct be_adapter *adapter = pci_get_drvdata(pdev);
3649 struct net_device *netdev = adapter->netdev;
3650
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003651 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003652 if (adapter->wol)
3653 be_setup_wol(adapter, true);
3654
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003655 netif_device_detach(netdev);
3656 if (netif_running(netdev)) {
3657 rtnl_lock();
3658 be_close(netdev);
3659 rtnl_unlock();
3660 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003661 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003662
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003663 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003664 pci_save_state(pdev);
3665 pci_disable_device(pdev);
3666 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3667 return 0;
3668}
3669
3670static int be_resume(struct pci_dev *pdev)
3671{
3672 int status = 0;
3673 struct be_adapter *adapter = pci_get_drvdata(pdev);
3674 struct net_device *netdev = adapter->netdev;
3675
3676 netif_device_detach(netdev);
3677
3678 status = pci_enable_device(pdev);
3679 if (status)
3680 return status;
3681
3682 pci_set_power_state(pdev, 0);
3683 pci_restore_state(pdev);
3684
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003685 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003686 /* tell fw we're ready to fire cmds */
3687 status = be_cmd_fw_init(adapter);
3688 if (status)
3689 return status;
3690
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003691 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003692 if (netif_running(netdev)) {
3693 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003694 be_open(netdev);
3695 rtnl_unlock();
3696 }
3697 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003698
3699 if (adapter->wol)
3700 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003701
3702 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003703 return 0;
3704}
3705
Sathya Perla82456b02010-02-17 01:35:37 +00003706/*
3707 * An FLR will stop BE from DMAing any data.
3708 */
3709static void be_shutdown(struct pci_dev *pdev)
3710{
3711 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003712
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003713 if (!adapter)
3714 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003715
Sathya Perla0f4a6822011-03-21 20:49:28 +00003716 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003717
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003718 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003719
Sathya Perla82456b02010-02-17 01:35:37 +00003720 if (adapter->wol)
3721 be_setup_wol(adapter, true);
3722
Ajit Khaparde57841862011-04-06 18:08:43 +00003723 be_cmd_reset_function(adapter);
3724
Sathya Perla82456b02010-02-17 01:35:37 +00003725 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003726}
3727
Sathya Perlacf588472010-02-14 21:22:01 +00003728static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3729 pci_channel_state_t state)
3730{
3731 struct be_adapter *adapter = pci_get_drvdata(pdev);
3732 struct net_device *netdev = adapter->netdev;
3733
3734 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3735
3736 adapter->eeh_err = true;
3737
3738 netif_device_detach(netdev);
3739
3740 if (netif_running(netdev)) {
3741 rtnl_lock();
3742 be_close(netdev);
3743 rtnl_unlock();
3744 }
3745 be_clear(adapter);
3746
3747 if (state == pci_channel_io_perm_failure)
3748 return PCI_ERS_RESULT_DISCONNECT;
3749
3750 pci_disable_device(pdev);
3751
3752 return PCI_ERS_RESULT_NEED_RESET;
3753}
3754
3755static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3756{
3757 struct be_adapter *adapter = pci_get_drvdata(pdev);
3758 int status;
3759
3760 dev_info(&adapter->pdev->dev, "EEH reset\n");
3761 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003762 adapter->ue_detected = false;
3763 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003764
3765 status = pci_enable_device(pdev);
3766 if (status)
3767 return PCI_ERS_RESULT_DISCONNECT;
3768
3769 pci_set_master(pdev);
3770 pci_set_power_state(pdev, 0);
3771 pci_restore_state(pdev);
3772
3773 /* Check if card is ok and fw is ready */
3774 status = be_cmd_POST(adapter);
3775 if (status)
3776 return PCI_ERS_RESULT_DISCONNECT;
3777
3778 return PCI_ERS_RESULT_RECOVERED;
3779}
3780
3781static void be_eeh_resume(struct pci_dev *pdev)
3782{
3783 int status = 0;
3784 struct be_adapter *adapter = pci_get_drvdata(pdev);
3785 struct net_device *netdev = adapter->netdev;
3786
3787 dev_info(&adapter->pdev->dev, "EEH resume\n");
3788
3789 pci_save_state(pdev);
3790
3791 /* tell fw we're ready to fire cmds */
3792 status = be_cmd_fw_init(adapter);
3793 if (status)
3794 goto err;
3795
3796 status = be_setup(adapter);
3797 if (status)
3798 goto err;
3799
3800 if (netif_running(netdev)) {
3801 status = be_open(netdev);
3802 if (status)
3803 goto err;
3804 }
3805 netif_device_attach(netdev);
3806 return;
3807err:
3808 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003809}
3810
3811static struct pci_error_handlers be_eeh_handlers = {
3812 .error_detected = be_eeh_err_detected,
3813 .slot_reset = be_eeh_reset,
3814 .resume = be_eeh_resume,
3815};
3816
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003817static struct pci_driver be_driver = {
3818 .name = DRV_NAME,
3819 .id_table = be_dev_ids,
3820 .probe = be_probe,
3821 .remove = be_remove,
3822 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003823 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003824 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003825 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826};
3827
3828static int __init be_init_module(void)
3829{
Joe Perches8e95a202009-12-03 07:58:21 +00003830 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3831 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832 printk(KERN_WARNING DRV_NAME
3833 " : Module param rx_frag_size must be 2048/4096/8192."
3834 " Using 2048\n");
3835 rx_frag_size = 2048;
3836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838 return pci_register_driver(&be_driver);
3839}
3840module_init(be_init_module);
3841
3842static void __exit be_exit_module(void)
3843{
3844 pci_unregister_driver(&be_driver);
3845}
3846module_exit(be_exit_module);