blob: 2373d394fa71d1a03528cde2ad852d992a7f03c5 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000041 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070043 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000046/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
82static char *ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC"
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
Sathya Perla8788fdc2009-07-27 22:52:03 +0000142static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147
Sathya Perlacf588472010-02-14 21:22:01 +0000148 if (adapter->eeh_err)
149 return;
150
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 iowrite32(reg, addr);
159}
160
Sathya Perla8788fdc2009-07-27 22:52:03 +0000161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000166
167 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Sathya Perla8788fdc2009-07-27 22:52:03 +0000171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000176
177 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179}
180
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000188
189 if (adapter->eeh_err)
190 return;
191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000207
208 if (adapter->eeh_err)
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215}
216
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
Ajit Khapardef8617e02011-02-11 13:36:37 +0000232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000234 if (status)
235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236
Sathya Perlaa65027e2009-08-17 00:58:04 +0000237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000238 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000239netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000246static void populate_be2_stats(struct be_adapter *adapter)
247{
248
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
255
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
278
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
281
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299}
300
301static void populate_be3_stats(struct be_adapter *adapter)
302{
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
310
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350}
351
Selvin Xavier005d5692011-05-16 07:36:35 +0000352static void populate_lancer_stats(struct be_adapter *adapter)
353{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354
Selvin Xavier005d5692011-05-16 07:36:35 +0000355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
Selvin Xavieraedfebb2011-06-06 02:27:13 +0000361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
Selvin Xavier005d5692011-05-16 07:36:35 +0000363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
407}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408
409void be_parse_stats(struct be_adapter *adapter)
410{
Selvin Xavier005d5692011-05-16 07:36:35 +0000411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000418 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419}
420
Sathya Perlab31c50a2009-09-17 10:30:13 -0700421void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700425 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000426 struct be_tx_obj *txo;
Sathya Perla6e533912011-06-26 20:40:48 +0000427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -0700428 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700429
Sathya Perla3abcded2010-10-03 22:12:27 -0700430 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700434 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000436 if (!(lancer_chip(adapter))) {
Sathya Perla6e533912011-06-26 20:40:48 +0000437 struct be_erx_stats_v1 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000439 drops += erx->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000440 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 } else {
Sathya Perla6e533912011-06-26 20:40:48 +0000442 struct be_erx_stats_v0 *erx =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 be_erx_stats_from_cmd(adapter);
Sathya Perla6e533912011-06-26 20:40:48 +0000444 drops += erx->rx_drops_no_fragments[rxo->q.id];
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700446 }
Sathya Perla6e533912011-06-26 20:40:48 +0000447 dev_stats->rx_packets = pkts;
448 dev_stats->rx_bytes = bytes;
449 dev_stats->multicast = mcast;
450 dev_stats->rx_dropped = drops;
Sathya Perla3abcded2010-10-03 22:12:27 -0700451
Sathya Perla6e533912011-06-26 20:40:48 +0000452 pkts = bytes = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000453 for_all_tx_queues(adapter, txo, i) {
Sathya Perla6e533912011-06-26 20:40:48 +0000454 pkts += tx_stats(txo)->be_tx_pkts;
455 bytes += tx_stats(txo)->be_tx_bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000456 }
Sathya Perla6e533912011-06-26 20:40:48 +0000457 dev_stats->tx_packets = pkts;
458 dev_stats->tx_bytes = bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700459
460 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461 dev_stats->rx_errors = drvs->rx_crc_errors +
462 drvs->rx_alignment_symbol_errors +
463 drvs->rx_in_range_errors +
464 drvs->rx_out_range_errors +
465 drvs->rx_frame_too_long +
466 drvs->rx_dropped_too_small +
467 drvs->rx_dropped_too_short +
468 drvs->rx_dropped_header_too_small +
469 drvs->rx_dropped_tcp_length +
470 drvs->rx_dropped_runt +
471 drvs->rx_tcp_checksum_errs +
472 drvs->rx_ip_checksum_errs +
473 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000476 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
477 drvs->rx_out_range_errors +
478 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000479
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000480 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
482 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000484
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485 /* receiver fifo overrun */
486 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000487 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488 drvs->rx_input_fifo_overflow_drop +
489 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490}
491
Sathya Perla8788fdc2009-07-27 22:52:03 +0000492void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494 struct net_device *netdev = adapter->netdev;
495
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000497 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000498 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000499 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 netif_carrier_on(netdev);
501 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000502 } else {
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000503 netif_carrier_off(netdev);
504 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000506 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508}
509
510/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700511static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512{
Sathya Perla3abcded2010-10-03 22:12:27 -0700513 struct be_eq_obj *rx_eq = &rxo->rx_eq;
514 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700515 ulong now = jiffies;
516 u32 eqd;
517
518 if (!rx_eq->enable_aic)
519 return;
520
521 /* Wrapped around */
522 if (time_before(now, stats->rx_fps_jiffies)) {
523 stats->rx_fps_jiffies = now;
524 return;
525 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526
527 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700528 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700529 return;
530
Sathya Perla3abcded2010-10-03 22:12:27 -0700531 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700532 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533
Sathya Perla4097f662009-03-24 16:40:13 -0700534 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700535 stats->prev_rx_frags = stats->rx_frags;
536 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 eqd = eqd << 3;
538 if (eqd > rx_eq->max_eqd)
539 eqd = rx_eq->max_eqd;
540 if (eqd < rx_eq->min_eqd)
541 eqd = rx_eq->min_eqd;
542 if (eqd < 10)
543 eqd = 0;
544 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000545 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546
547 rx_eq->cur_eqd = eqd;
548}
549
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700550static u32 be_calc_rate(u64 bytes, unsigned long ticks)
551{
552 u64 rate = bytes;
553
554 do_div(rate, ticks / HZ);
555 rate <<= 3; /* bytes/sec -> bits/sec */
556 do_div(rate, 1000000ul); /* MB/Sec */
557
558 return rate;
559}
560
Sathya Perla3c8def92011-06-12 20:01:58 +0000561static void be_tx_rate_update(struct be_tx_obj *txo)
Sathya Perla4097f662009-03-24 16:40:13 -0700562{
Sathya Perla3c8def92011-06-12 20:01:58 +0000563 struct be_tx_stats *stats = tx_stats(txo);
Sathya Perla4097f662009-03-24 16:40:13 -0700564 ulong now = jiffies;
565
566 /* Wrapped around? */
567 if (time_before(now, stats->be_tx_jiffies)) {
568 stats->be_tx_jiffies = now;
569 return;
570 }
571
572 /* Update tx rate once in two seconds */
573 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700574 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
575 - stats->be_tx_bytes_prev,
576 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700577 stats->be_tx_jiffies = now;
578 stats->be_tx_bytes_prev = stats->be_tx_bytes;
579 }
580}
581
Sathya Perla3c8def92011-06-12 20:01:58 +0000582static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_stats *stats = tx_stats(txo);
586
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 stats->be_tx_reqs++;
588 stats->be_tx_wrbs += wrb_cnt;
589 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000590 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 if (stopped)
592 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593}
594
595/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000596static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
597 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700599 int cnt = (skb->len > skb->data_len);
600
601 cnt += skb_shinfo(skb)->nr_frags;
602
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603 /* to account for hdr wrb */
604 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000605 if (lancer_chip(adapter) || !(cnt & 1)) {
606 *dummy = false;
607 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 /* add a dummy to make it an even num */
609 cnt++;
610 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000611 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
613 return cnt;
614}
615
616static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
617{
618 wrb->frag_pa_hi = upper_32_bits(addr);
619 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
620 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
621}
622
Somnath Koturcc4ce022010-10-21 07:11:14 -0700623static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
624 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700626 u8 vlan_prio = 0;
627 u16 vlan_tag = 0;
628
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 memset(hdr, 0, sizeof(*hdr));
630
631 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
632
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000633 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
636 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000637 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000639 if (lancer_chip(adapter) && adapter->sli_family ==
640 LANCER_A0_SLI_FAMILY) {
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
642 if (is_tcp_pkt(skb))
643 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
644 tcpcs, hdr, 1);
645 else if (is_udp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
647 udpcs, hdr, 1);
648 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
Somnath Koturcc4ce022010-10-21 07:11:14 -0700656 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658 vlan_tag = vlan_tx_tag_get(skb);
659 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
660 /* If vlan priority provided by OS is NOT in available bmap */
661 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
662 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
663 adapter->recommended_prio;
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665 }
666
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
671}
672
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000673static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000674 bool unmap_single)
675{
676 dma_addr_t dma;
677
678 be_dws_le_to_cpu(wrb, sizeof(*wrb));
679
680 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000681 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000682 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000683 dma_unmap_single(dev, dma, wrb->frag_len,
684 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000685 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000686 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000687 }
688}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689
Sathya Perla3c8def92011-06-12 20:01:58 +0000690static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
692{
Sathya Perla7101e112010-03-22 20:41:12 +0000693 dma_addr_t busaddr;
694 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000695 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 struct be_eth_wrb *wrb;
698 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000699 bool map_single = false;
700 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 hdr = queue_head_node(txq);
703 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000704 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
David S. Millerebc8d2a2009-06-09 01:01:31 -0700706 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700707 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000708 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
709 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000710 goto dma_err;
711 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700712 wrb = queue_head_node(txq);
713 wrb_fill(wrb, busaddr, len);
714 be_dws_cpu_to_le(wrb, sizeof(*wrb));
715 queue_head_inc(txq);
716 copied += len;
717 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
David S. Millerebc8d2a2009-06-09 01:01:31 -0700719 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
720 struct skb_frag_struct *frag =
721 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000722 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
723 frag->size, DMA_TO_DEVICE);
724 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000725 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700726 wrb = queue_head_node(txq);
727 wrb_fill(wrb, busaddr, frag->size);
728 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729 queue_head_inc(txq);
730 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 }
732
733 if (dummy_wrb) {
734 wrb = queue_head_node(txq);
735 wrb_fill(wrb, 0, 0);
736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
738 }
739
Somnath Koturcc4ce022010-10-21 07:11:14 -0700740 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700741 be_dws_cpu_to_le(hdr, sizeof(*hdr));
742
743 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000744dma_err:
745 txq->head = map_head;
746 while (copied) {
747 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000748 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000749 map_single = false;
750 copied -= wrb->frag_len;
751 queue_head_inc(txq);
752 }
753 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754}
755
Stephen Hemminger613573252009-08-31 19:50:58 +0000756static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700757 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758{
759 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000760 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
761 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 u32 wrb_cnt = 0, copied = 0;
763 u32 start = txq->head;
764 bool dummy_wrb, stopped = false;
765
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000766 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767
Sathya Perla3c8def92011-06-12 20:01:58 +0000768 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000769 if (copied) {
770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Sathya Perla3c8def92011-06-12 20:01:58 +0000787 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000788 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000818static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820 u16 vtag[BE_NUM_VLANS_SUPPORTED];
821 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823 u32 if_handle;
824
825 if (vf) {
826 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
827 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
828 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
829 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830
Ajit Khaparde82903e42010-02-09 01:34:57 +0000831 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000833 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 if (adapter->vlan_tag[i]) {
835 vtag[ntags] = cpu_to_le16(i);
836 ntags++;
837 }
838 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700839 status = be_cmd_vlan_config(adapter, adapter->if_handle,
840 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700842 status = be_cmd_vlan_config(adapter, adapter->if_handle,
843 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847}
848
849static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
850{
851 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854}
855
856static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
857{
858 struct be_adapter *adapter = netdev_priv(netdev);
859
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000861 if (!be_physfn(adapter))
862 return;
863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000866 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867}
868
869static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
870{
871 struct be_adapter *adapter = netdev_priv(netdev);
872
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000873 adapter->vlans_added--;
874 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
875
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000876 if (!be_physfn(adapter))
877 return;
878
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000880 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000881 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700882}
883
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884static void be_set_multicast_list(struct net_device *netdev)
885{
886 struct be_adapter *adapter = netdev_priv(netdev);
887
888 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000889 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000890 adapter->promiscuous = true;
891 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000893
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300894 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000895 if (adapter->promiscuous) {
896 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000897 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000898 }
899
Sathya Perlae7b909a2009-11-22 22:01:10 +0000900 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000901 if (netdev->flags & IFF_ALLMULTI ||
902 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000903 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000904 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 goto done;
906 }
907
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000908 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800909 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000910done:
911 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700912}
913
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000914static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
915{
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status;
918
919 if (!adapter->sriov_enabled)
920 return -EPERM;
921
922 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
923 return -EINVAL;
924
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000925 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
926 status = be_cmd_pmac_del(adapter,
927 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000928 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000929
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 status = be_cmd_pmac_add(adapter, mac,
931 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000932 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933
934 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000935 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
936 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937 else
938 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
939
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000940 return status;
941}
942
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000943static int be_get_vf_config(struct net_device *netdev, int vf,
944 struct ifla_vf_info *vi)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947
948 if (!adapter->sriov_enabled)
949 return -EPERM;
950
951 if (vf >= num_vfs)
952 return -EINVAL;
953
954 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000955 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000956 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957 vi->qos = 0;
958 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
959
960 return 0;
961}
962
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000963static int be_set_vf_vlan(struct net_device *netdev,
964 int vf, u16 vlan, u8 qos)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
967 int status = 0;
968
969 if (!adapter->sriov_enabled)
970 return -EPERM;
971
972 if ((vf >= num_vfs) || (vlan > 4095))
973 return -EINVAL;
974
975 if (vlan) {
976 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
977 adapter->vlans_added++;
978 } else {
979 adapter->vf_cfg[vf].vf_vlan_tag = 0;
980 adapter->vlans_added--;
981 }
982
983 status = be_vid_config(adapter, true, vf);
984
985 if (status)
986 dev_info(&adapter->pdev->dev,
987 "VLAN %d config on VF %d failed\n", vlan, vf);
988 return status;
989}
990
Ajit Khapardee1d18732010-07-23 01:52:13 +0000991static int be_set_vf_tx_rate(struct net_device *netdev,
992 int vf, int rate)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
995 int status = 0;
996
997 if (!adapter->sriov_enabled)
998 return -EPERM;
999
1000 if ((vf >= num_vfs) || (rate < 0))
1001 return -EINVAL;
1002
1003 if (rate > 10000)
1004 rate = 10000;
1005
1006 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001007 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001008
1009 if (status)
1010 dev_info(&adapter->pdev->dev,
1011 "tx rate %d on VF %d failed\n", rate, vf);
1012 return status;
1013}
1014
Sathya Perla3abcded2010-10-03 22:12:27 -07001015static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016{
Sathya Perla3abcded2010-10-03 22:12:27 -07001017 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001018 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019
Sathya Perla4097f662009-03-24 16:40:13 -07001020 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001021 if (time_before(now, stats->rx_jiffies)) {
1022 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001023 return;
1024 }
1025
1026 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001027 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001028 return;
1029
Sathya Perla3abcded2010-10-03 22:12:27 -07001030 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1031 now - stats->rx_jiffies);
1032 stats->rx_jiffies = now;
1033 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001034}
1035
Sathya Perla3abcded2010-10-03 22:12:27 -07001036static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001038{
Sathya Perla3abcded2010-10-03 22:12:27 -07001039 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001040
Sathya Perla3abcded2010-10-03 22:12:27 -07001041 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001042 stats->rx_frags += rxcp->num_rcvd;
1043 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001044 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001046 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 if (rxcp->err)
1048 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049}
1050
Sathya Perla2e588f82011-03-11 02:49:26 +00001051static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001052{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001053 /* L4 checksum is not reliable for non TCP/UDP packets.
1054 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1056 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001057}
1058
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001060get_rx_page_info(struct be_adapter *adapter,
1061 struct be_rx_obj *rxo,
1062 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063{
1064 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001065 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066
Sathya Perla3abcded2010-10-03 22:12:27 -07001067 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068 BUG_ON(!rx_page_info->page);
1069
Ajit Khaparde205859a2010-02-09 01:34:21 +00001070 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001071 dma_unmap_page(&adapter->pdev->dev,
1072 dma_unmap_addr(rx_page_info, bus),
1073 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001074 rx_page_info->last_page_user = false;
1075 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076
1077 atomic_dec(&rxq->used);
1078 return rx_page_info;
1079}
1080
1081/* Throwaway the data in the Rx completion */
1082static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001083 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001084 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085{
Sathya Perla3abcded2010-10-03 22:12:27 -07001086 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001090 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001091 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001092 put_page(page_info->page);
1093 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001094 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 }
1096}
1097
1098/*
1099 * skb_fill_rx_data forms a complete skb for an ether frame
1100 * indicated by rxcp.
1101 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001102static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104{
Sathya Perla3abcded2010-10-03 22:12:27 -07001105 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 u16 i, j;
1108 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109 u8 *start;
1110
Sathya Perla2e588f82011-03-11 02:49:26 +00001111 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 start = page_address(page_info->page) + page_info->page_offset;
1113 prefetch(start);
1114
1115 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117
1118 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001119 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 memcpy(skb->data, start, hdr_len);
1121 skb->len = curr_frag_len;
1122 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1123 /* Complete packet has now been moved to data */
1124 put_page(page_info->page);
1125 skb->data_len = 0;
1126 skb->tail += curr_frag_len;
1127 } else {
1128 skb_shinfo(skb)->nr_frags = 1;
1129 skb_shinfo(skb)->frags[0].page = page_info->page;
1130 skb_shinfo(skb)->frags[0].page_offset =
1131 page_info->page_offset + hdr_len;
1132 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1133 skb->data_len = curr_frag_len - hdr_len;
1134 skb->tail += hdr_len;
1135 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001136 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 if (rxcp->pkt_size <= rx_frag_size) {
1139 BUG_ON(rxcp->num_rcvd != 1);
1140 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 }
1142
1143 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001144 index_inc(&rxcp->rxq_idx, rxq->len);
1145 remaining = rxcp->pkt_size - curr_frag_len;
1146 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1148 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001150 /* Coalesce all frags from the same physical page in one slot */
1151 if (page_info->page_offset == 0) {
1152 /* Fresh page */
1153 j++;
1154 skb_shinfo(skb)->frags[j].page = page_info->page;
1155 skb_shinfo(skb)->frags[j].page_offset =
1156 page_info->page_offset;
1157 skb_shinfo(skb)->frags[j].size = 0;
1158 skb_shinfo(skb)->nr_frags++;
1159 } else {
1160 put_page(page_info->page);
1161 }
1162
1163 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 skb->len += curr_frag_len;
1165 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166
Sathya Perla2e588f82011-03-11 02:49:26 +00001167 remaining -= curr_frag_len;
1168 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001169 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001171 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172}
1173
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001174/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001176 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001177 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001179 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001181
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001182 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001183 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 if (net_ratelimit())
1185 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 return;
1188 }
1189
Sathya Perla2e588f82011-03-11 02:49:26 +00001190 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001192 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001193 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001194 else
1195 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196
1197 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001198 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001199 if (adapter->netdev->features & NETIF_F_RXHASH)
1200 skb->rxhash = rxcp->rss_hash;
1201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001204 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 kfree_skb(skb);
1206 return;
1207 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001208 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1209 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 } else {
1211 netif_receive_skb(skb);
1212 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213}
1214
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001215/* Process the RX completion indicated by rxcp when GRO is enabled */
1216static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001217 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219{
1220 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001221 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001222 struct be_queue_info *rxq = &rxo->q;
1223 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001224 u16 remaining, curr_frag_len;
1225 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001226
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001227 skb = napi_get_frags(&eq_obj->napi);
1228 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001229 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001230 return;
1231 }
1232
Sathya Perla2e588f82011-03-11 02:49:26 +00001233 remaining = rxcp->pkt_size;
1234 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1235 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001236
1237 curr_frag_len = min(remaining, rx_frag_size);
1238
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001239 /* Coalesce all frags from the same physical page in one slot */
1240 if (i == 0 || page_info->page_offset == 0) {
1241 /* First frag or Fresh page */
1242 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001243 skb_shinfo(skb)->frags[j].page = page_info->page;
1244 skb_shinfo(skb)->frags[j].page_offset =
1245 page_info->page_offset;
1246 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001247 } else {
1248 put_page(page_info->page);
1249 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001250 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 memset(page_info, 0, sizeof(*page_info));
1255 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001256 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001258 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001259 skb->len = rxcp->pkt_size;
1260 skb->data_len = rxcp->pkt_size;
1261 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001263 if (adapter->netdev->features & NETIF_F_RXHASH)
1264 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001265
Sathya Perla2e588f82011-03-11 02:49:26 +00001266 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001267 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001268 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001269 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1270 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271}
1272
Sathya Perla2e588f82011-03-11 02:49:26 +00001273static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001303}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Sathya Perla2e588f82011-03-11 02:49:26 +00001305static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1306 struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001334 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001335}
1336
1337static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1338{
1339 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1340 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1341 struct be_adapter *adapter = rxo->adapter;
1342
1343 /* For checking the valid bit it is Ok to use either definition as the
1344 * valid bit is at the same position in both v0 and v1 Rx compl */
1345 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 return NULL;
1347
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001348 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001349 be_dws_le_to_cpu(compl, sizeof(*compl));
1350
1351 if (adapter->be3_native)
1352 be_parse_rx_compl_v1(adapter, compl, rxcp);
1353 else
1354 be_parse_rx_compl_v0(adapter, compl, rxcp);
1355
Sathya Perla15d72182011-03-21 20:49:26 +00001356 if (rxcp->vlanf) {
1357 /* vlanf could be wrongly set in some cards.
1358 * ignore if vtm is not set */
1359 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1360 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001361
Sathya Perla15d72182011-03-21 20:49:26 +00001362 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001363 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001364
David S. Miller3c709f82011-05-11 14:26:15 -04001365 if (((adapter->pvid & VLAN_VID_MASK) ==
1366 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1367 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001368 rxcp->vlanf = 0;
1369 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla3abcded2010-10-03 22:12:27 -07001374 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 return rxcp;
1376}
1377
Eric Dumazet1829b082011-03-01 05:48:12 +00001378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392{
Sathya Perla3abcded2010-10-03 22:12:27 -07001393 struct be_adapter *adapter = rxo->adapter;
1394 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001395 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001396 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 struct page *pagep = NULL;
1398 struct be_eth_rx_d *rxd;
1399 u64 page_dmaaddr = 0, frag_dmaaddr;
1400 u32 posted, page_offset = 0;
1401
Sathya Perla3abcded2010-10-03 22:12:27 -07001402 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1404 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001405 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001407 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 break;
1409 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001410 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1411 0, adapter->big_page_size,
1412 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 page_info->page_offset = 0;
1414 } else {
1415 get_page(pagep);
1416 page_info->page_offset = page_offset + rx_frag_size;
1417 }
1418 page_offset = page_info->page_offset;
1419 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001420 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1422
1423 rxd = queue_head_node(rxq);
1424 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1425 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
1427 /* Any space left in the current big page for another frag? */
1428 if ((page_offset + rx_frag_size + rx_frag_size) >
1429 adapter->big_page_size) {
1430 pagep = NULL;
1431 page_info->last_page_user = true;
1432 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001433
1434 prev_page_info = page_info;
1435 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 page_info = &page_info_tbl[rxq->head];
1437 }
1438 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001439 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440
1441 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001443 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001444 } else if (atomic_read(&rxq->used) == 0) {
1445 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001446 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Sathya Perla5fb379e2009-06-18 00:02:59 +00001450static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1453
1454 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1455 return NULL;
1456
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001457 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1459
1460 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1461
1462 queue_tail_inc(tx_cq);
1463 return txcp;
1464}
1465
Sathya Perla3c8def92011-06-12 20:01:58 +00001466static u16 be_tx_compl_process(struct be_adapter *adapter,
1467 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Sathya Perla3c8def92011-06-12 20:01:58 +00001469 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001470 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001471 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001473 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1474 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001476 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001478 sent_skbs[txq->tail] = NULL;
1479
1480 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001481 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001483 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001485 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001486 unmap_tx_frag(&adapter->pdev->dev, wrb,
1487 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001488 unmap_skb_hdr = false;
1489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 num_wrbs++;
1491 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001492 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001495 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496}
1497
Sathya Perla859b1e42009-08-10 03:43:51 +00001498static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1499{
1500 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1501
1502 if (!eqe->evt)
1503 return NULL;
1504
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001505 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001506 eqe->evt = le32_to_cpu(eqe->evt);
1507 queue_tail_inc(&eq_obj->q);
1508 return eqe;
1509}
1510
1511static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001512 struct be_eq_obj *eq_obj,
1513 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001514{
1515 struct be_eq_entry *eqe;
1516 u16 num = 0;
1517
1518 while ((eqe = event_get(eq_obj)) != NULL) {
1519 eqe->evt = 0;
1520 num++;
1521 }
1522
1523 /* Deal with any spurious interrupts that come
1524 * without events
1525 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001526 if (!num)
1527 rearm = true;
1528
1529 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001530 if (num)
1531 napi_schedule(&eq_obj->napi);
1532
1533 return num;
1534}
1535
1536/* Just read and notify events without processing them.
1537 * Used at the time of destroying event queues */
1538static void be_eq_clean(struct be_adapter *adapter,
1539 struct be_eq_obj *eq_obj)
1540{
1541 struct be_eq_entry *eqe;
1542 u16 num = 0;
1543
1544 while ((eqe = event_get(eq_obj)) != NULL) {
1545 eqe->evt = 0;
1546 num++;
1547 }
1548
1549 if (num)
1550 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1551}
1552
Sathya Perla3abcded2010-10-03 22:12:27 -07001553static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
1555 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_queue_info *rxq = &rxo->q;
1557 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001558 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 u16 tail;
1560
1561 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1563 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001564 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 }
1566
1567 /* Then free posted rx buffer that were not used */
1568 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001569 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001570 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 put_page(page_info->page);
1572 memset(page_info, 0, sizeof(*page_info));
1573 }
1574 BUG_ON(atomic_read(&rxq->used));
1575}
1576
Sathya Perla3c8def92011-06-12 20:01:58 +00001577static void be_tx_compl_clean(struct be_adapter *adapter,
1578 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579{
Sathya Perla3c8def92011-06-12 20:01:58 +00001580 struct be_queue_info *tx_cq = &txo->cq;
1581 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001582 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001583 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001584 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001585 struct sk_buff *sent_skb;
1586 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Sathya Perlaa8e91792009-08-10 03:42:43 +00001588 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1589 do {
1590 while ((txcp = be_tx_compl_get(tx_cq))) {
1591 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1592 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001593 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001594 cmpl++;
1595 }
1596 if (cmpl) {
1597 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001599 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001600 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001601 }
1602
1603 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1604 break;
1605
1606 mdelay(1);
1607 } while (true);
1608
1609 if (atomic_read(&txq->used))
1610 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1611 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001612
1613 /* free posted tx for which compls will never arrive */
1614 while (atomic_read(&txq->used)) {
1615 sent_skb = sent_skbs[txq->tail];
1616 end_idx = txq->tail;
1617 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001618 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1619 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001620 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001621 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001622 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623}
1624
Sathya Perla5fb379e2009-06-18 00:02:59 +00001625static void be_mcc_queues_destroy(struct be_adapter *adapter)
1626{
1627 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001628
Sathya Perla8788fdc2009-07-27 22:52:03 +00001629 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001631 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632 be_queue_free(adapter, q);
1633
Sathya Perla8788fdc2009-07-27 22:52:03 +00001634 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001635 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 be_queue_free(adapter, q);
1638}
1639
1640/* Must be called only after TX qs are created as MCC shares TX EQ */
1641static int be_mcc_queues_create(struct be_adapter *adapter)
1642{
1643 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001644
1645 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001646 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001647 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001648 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001649 goto err;
1650
1651 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001652 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001653 goto mcc_cq_free;
1654
1655 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001656 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001657 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1658 goto mcc_cq_destroy;
1659
1660 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001661 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001662 goto mcc_q_free;
1663
1664 return 0;
1665
1666mcc_q_free:
1667 be_queue_free(adapter, q);
1668mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001670mcc_cq_free:
1671 be_queue_free(adapter, cq);
1672err:
1673 return -1;
1674}
1675
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676static void be_tx_queues_destroy(struct be_adapter *adapter)
1677{
1678 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001679 struct be_tx_obj *txo;
1680 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Sathya Perla3c8def92011-06-12 20:01:58 +00001682 for_all_tx_queues(adapter, txo, i) {
1683 q = &txo->q;
1684 if (q->created)
1685 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1686 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Sathya Perla3c8def92011-06-12 20:01:58 +00001688 q = &txo->cq;
1689 if (q->created)
1690 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1691 be_queue_free(adapter, q);
1692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693
Sathya Perla859b1e42009-08-10 03:43:51 +00001694 /* Clear any residual events */
1695 be_eq_clean(adapter, &adapter->tx_eq);
1696
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 q = &adapter->tx_eq.q;
1698 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001699 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 be_queue_free(adapter, q);
1701}
1702
Sathya Perla3c8def92011-06-12 20:01:58 +00001703/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704static int be_tx_queues_create(struct be_adapter *adapter)
1705{
1706 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001707 struct be_tx_obj *txo;
1708 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
1710 adapter->tx_eq.max_eqd = 0;
1711 adapter->tx_eq.min_eqd = 0;
1712 adapter->tx_eq.cur_eqd = 96;
1713 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001714
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001716 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1717 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 return -1;
1719
Sathya Perla8788fdc2009-07-27 22:52:03 +00001720 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001721 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001722 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001723
Sathya Perla3c8def92011-06-12 20:01:58 +00001724 for_all_tx_queues(adapter, txo, i) {
1725 cq = &txo->cq;
1726 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001728 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729
Sathya Perla3c8def92011-06-12 20:01:58 +00001730 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1731 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732
Sathya Perla3c8def92011-06-12 20:01:58 +00001733 q = &txo->q;
1734 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1735 sizeof(struct be_eth_wrb)))
1736 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737
Sathya Perla3c8def92011-06-12 20:01:58 +00001738 if (be_cmd_txq_create(adapter, q, cq))
1739 goto err;
1740 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 return 0;
1742
Sathya Perla3c8def92011-06-12 20:01:58 +00001743err:
1744 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745 return -1;
1746}
1747
1748static void be_rx_queues_destroy(struct be_adapter *adapter)
1749{
1750 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 struct be_rx_obj *rxo;
1752 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perla3abcded2010-10-03 22:12:27 -07001754 for_all_rx_queues(adapter, rxo, i) {
1755 q = &rxo->q;
1756 if (q->created) {
1757 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1758 /* After the rxq is invalidated, wait for a grace time
1759 * of 1ms for all dma to end and the flush compl to
1760 * arrive
1761 */
1762 mdelay(1);
1763 be_rx_q_clean(adapter, rxo);
1764 }
1765 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001766
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 q = &rxo->cq;
1768 if (q->created)
1769 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1770 be_queue_free(adapter, q);
1771
1772 /* Clear any residual events */
1773 q = &rxo->rx_eq.q;
1774 if (q->created) {
1775 be_eq_clean(adapter, &rxo->rx_eq);
1776 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1777 }
1778 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780}
1781
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001782static u32 be_num_rxqs_want(struct be_adapter *adapter)
1783{
Sathya Perlac814fd32011-06-26 20:41:25 +00001784 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001785 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1786 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1787 } else {
1788 dev_warn(&adapter->pdev->dev,
1789 "No support for multiple RX queues\n");
1790 return 1;
1791 }
1792}
1793
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794static int be_rx_queues_create(struct be_adapter *adapter)
1795{
1796 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001797 struct be_rx_obj *rxo;
1798 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001800 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1801 msix_enabled(adapter) ?
1802 adapter->num_msix_vec - 1 : 1);
1803 if (adapter->num_rx_qs != MAX_RX_QS)
1804 dev_warn(&adapter->pdev->dev,
1805 "Can create only %d RX queues", adapter->num_rx_qs);
1806
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001808 for_all_rx_queues(adapter, rxo, i) {
1809 rxo->adapter = adapter;
1810 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1811 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 /* EQ */
1814 eq = &rxo->rx_eq.q;
1815 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1816 sizeof(struct be_eq_entry));
1817 if (rc)
1818 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819
Sathya Perla3abcded2010-10-03 22:12:27 -07001820 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1821 if (rc)
1822 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001824 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001825
Sathya Perla3abcded2010-10-03 22:12:27 -07001826 /* CQ */
1827 cq = &rxo->cq;
1828 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1829 sizeof(struct be_eth_rx_compl));
1830 if (rc)
1831 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832
Sathya Perla3abcded2010-10-03 22:12:27 -07001833 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1834 if (rc)
1835 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 /* Rx Q */
1837 q = &rxo->q;
1838 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1839 sizeof(struct be_eth_rx_d));
1840 if (rc)
1841 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842
Sathya Perla3abcded2010-10-03 22:12:27 -07001843 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1844 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1845 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1846 if (rc)
1847 goto err;
1848 }
1849
1850 if (be_multi_rxq(adapter)) {
1851 u8 rsstable[MAX_RSS_QS];
1852
1853 for_all_rss_queues(adapter, rxo, i)
1854 rsstable[i] = rxo->rss_id;
1855
1856 rc = be_cmd_rss_config(adapter, rsstable,
1857 adapter->num_rx_qs - 1);
1858 if (rc)
1859 goto err;
1860 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
1862 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001863err:
1864 be_rx_queues_destroy(adapter);
1865 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001868static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001869{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001870 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1871 if (!eqe->evt)
1872 return false;
1873 else
1874 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001875}
1876
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877static irqreturn_t be_intx(int irq, void *dev)
1878{
1879 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001881 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001883 if (lancer_chip(adapter)) {
1884 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001885 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001886 for_all_rx_queues(adapter, rxo, i) {
1887 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001888 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001889 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001891 if (!(tx || rx))
1892 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001893
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001894 } else {
1895 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1896 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1897 if (!isr)
1898 return IRQ_NONE;
1899
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001900 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001901 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001902
1903 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001904 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001905 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001906 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001907 }
Sathya Perlac001c212009-07-01 01:06:07 +00001908
Sathya Perla8788fdc2009-07-27 22:52:03 +00001909 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910}
1911
1912static irqreturn_t be_msix_rx(int irq, void *dev)
1913{
Sathya Perla3abcded2010-10-03 22:12:27 -07001914 struct be_rx_obj *rxo = dev;
1915 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
Sathya Perla3c8def92011-06-12 20:01:58 +00001917 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
1919 return IRQ_HANDLED;
1920}
1921
Sathya Perla5fb379e2009-06-18 00:02:59 +00001922static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923{
1924 struct be_adapter *adapter = dev;
1925
Sathya Perla3c8def92011-06-12 20:01:58 +00001926 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927
1928 return IRQ_HANDLED;
1929}
1930
Sathya Perla2e588f82011-03-11 02:49:26 +00001931static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932{
Sathya Perla2e588f82011-03-11 02:49:26 +00001933 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934}
1935
stephen hemminger49b05222010-10-21 07:50:48 +00001936static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937{
1938 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1940 struct be_adapter *adapter = rxo->adapter;
1941 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001942 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 u32 work_done;
1944
Sathya Perla3abcded2010-10-03 22:12:27 -07001945 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001947 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948 if (!rxcp)
1949 break;
1950
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001951 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001952 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001953 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001954 be_rx_compl_process_gro(adapter, rxo, rxcp);
1955 else
1956 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001957 } else if (rxcp->pkt_size == 0) {
1958 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001959 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001960
Sathya Perla2e588f82011-03-11 02:49:26 +00001961 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962 }
1963
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001965 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001966 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967
1968 /* All consumed */
1969 if (work_done < budget) {
1970 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001971 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972 } else {
1973 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001974 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975 }
1976 return work_done;
1977}
1978
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001979/* As TX and MCC share the same EQ check for both TX and MCC completions.
1980 * For TX/MCC we don't honour budget; consume everything
1981 */
1982static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001984 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1985 struct be_adapter *adapter =
1986 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001987 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001989 int tx_compl, mcc_compl, status = 0;
1990 u8 i;
1991 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 for_all_tx_queues(adapter, txo, i) {
1994 tx_compl = 0;
1995 num_wrbs = 0;
1996 while ((txcp = be_tx_compl_get(&txo->cq))) {
1997 num_wrbs += be_tx_compl_process(adapter, txo,
1998 AMAP_GET_BITS(struct amap_eth_tx_compl,
1999 wrb_index, txcp));
2000 tx_compl++;
2001 }
2002 if (tx_compl) {
2003 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2004
2005 atomic_sub(num_wrbs, &txo->q.used);
2006
2007 /* As Tx wrbs have been freed up, wake up netdev queue
2008 * if it was stopped due to lack of tx wrbs. */
2009 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2010 atomic_read(&txo->q.used) < txo->q.len / 2) {
2011 netif_wake_subqueue(adapter->netdev, i);
2012 }
2013
2014 adapter->drv_stats.be_tx_events++;
2015 txo->stats.be_tx_compl += tx_compl;
2016 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 }
2018
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002019 mcc_compl = be_process_mcc(adapter, &status);
2020
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002021 if (mcc_compl) {
2022 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2023 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2024 }
2025
Sathya Perla3c8def92011-06-12 20:01:58 +00002026 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002027
Sathya Perla3c8def92011-06-12 20:01:58 +00002028 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029 return 1;
2030}
2031
Ajit Khaparded053de92010-09-03 06:23:30 +00002032void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002033{
2034 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2035 u32 i;
2036
2037 pci_read_config_dword(adapter->pdev,
2038 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2045
2046 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2047 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2048
Ajit Khaparded053de92010-09-03 06:23:30 +00002049 if (ue_status_lo || ue_status_hi) {
2050 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002051 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002052 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2053 }
2054
Ajit Khaparde7c185272010-07-29 06:16:33 +00002055 if (ue_status_lo) {
2056 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2057 if (ue_status_lo & 1)
2058 dev_err(&adapter->pdev->dev,
2059 "UE: %s bit set\n", ue_status_low_desc[i]);
2060 }
2061 }
2062 if (ue_status_hi) {
2063 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2064 if (ue_status_hi & 1)
2065 dev_err(&adapter->pdev->dev,
2066 "UE: %s bit set\n", ue_status_hi_desc[i]);
2067 }
2068 }
2069
2070}
2071
Sathya Perlaea1dae12009-03-19 23:56:20 -07002072static void be_worker(struct work_struct *work)
2073{
2074 struct be_adapter *adapter =
2075 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002077 struct be_tx_obj *txo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002078 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002079
Sathya Perla16da8252011-03-21 20:49:27 +00002080 if (!adapter->ue_detected && !lancer_chip(adapter))
2081 be_detect_dump_ue(adapter);
2082
Somnath Koturf203af72010-10-25 23:01:03 +00002083 /* when interrupts are not yet enabled, just reap any pending
2084 * mcc completions */
2085 if (!netif_running(adapter->netdev)) {
2086 int mcc_compl, status = 0;
2087
2088 mcc_compl = be_process_mcc(adapter, &status);
2089
2090 if (mcc_compl) {
2091 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2092 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2093 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002094
Somnath Koturf203af72010-10-25 23:01:03 +00002095 goto reschedule;
2096 }
2097
Selvin Xavier005d5692011-05-16 07:36:35 +00002098 if (!adapter->stats_cmd_sent) {
2099 if (lancer_chip(adapter))
2100 lancer_cmd_get_pport_stats(adapter,
2101 &adapter->stats_cmd);
2102 else
2103 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2104 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002105
2106 for_all_tx_queues(adapter, txo, i)
2107 be_tx_rate_update(txo);
Sathya Perla4097f662009-03-24 16:40:13 -07002108
Sathya Perla3abcded2010-10-03 22:12:27 -07002109 for_all_rx_queues(adapter, rxo, i) {
2110 be_rx_rate_update(rxo);
2111 be_rx_eqd_update(adapter, rxo);
2112
2113 if (rxo->rx_post_starved) {
2114 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002115 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002116 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002117 }
2118
Somnath Koturf203af72010-10-25 23:01:03 +00002119reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002120 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002121 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2122}
2123
Sathya Perla8d56ff12009-11-22 22:02:26 +00002124static void be_msix_disable(struct be_adapter *adapter)
2125{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002126 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002127 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002128 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002129 }
2130}
2131
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132static void be_msix_enable(struct be_adapter *adapter)
2133{
Sathya Perla3abcded2010-10-03 22:12:27 -07002134#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002138
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002139 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140 adapter->msix_entries[i].entry = i;
2141
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002142 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002143 if (status == 0) {
2144 goto done;
2145 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002146 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002147 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002148 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002149 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002150 }
2151 return;
2152done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002153 adapter->num_msix_vec = num_vec;
2154 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155}
2156
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002157static void be_sriov_enable(struct be_adapter *adapter)
2158{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002159 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002160#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002161 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002162 int status, pos;
2163 u16 nvfs;
2164
2165 pos = pci_find_ext_capability(adapter->pdev,
2166 PCI_EXT_CAP_ID_SRIOV);
2167 pci_read_config_word(adapter->pdev,
2168 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2169
2170 if (num_vfs > nvfs) {
2171 dev_info(&adapter->pdev->dev,
2172 "Device supports %d VFs and not %d\n",
2173 nvfs, num_vfs);
2174 num_vfs = nvfs;
2175 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002176
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002177 status = pci_enable_sriov(adapter->pdev, num_vfs);
2178 adapter->sriov_enabled = status ? false : true;
2179 }
2180#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002181}
2182
2183static void be_sriov_disable(struct be_adapter *adapter)
2184{
2185#ifdef CONFIG_PCI_IOV
2186 if (adapter->sriov_enabled) {
2187 pci_disable_sriov(adapter->pdev);
2188 adapter->sriov_enabled = false;
2189 }
2190#endif
2191}
2192
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002193static inline int be_msix_vec_get(struct be_adapter *adapter,
2194 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002196 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002197}
2198
2199static int be_request_irq(struct be_adapter *adapter,
2200 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002202{
2203 struct net_device *netdev = adapter->netdev;
2204 int vec;
2205
2206 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002207 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002209}
2210
Sathya Perla3abcded2010-10-03 22:12:27 -07002211static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2212 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002213{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002214 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216}
2217
2218static int be_msix_register(struct be_adapter *adapter)
2219{
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 struct be_rx_obj *rxo;
2221 int status, i;
2222 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223
Sathya Perla3abcded2010-10-03 22:12:27 -07002224 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2225 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 if (status)
2227 goto err;
2228
Sathya Perla3abcded2010-10-03 22:12:27 -07002229 for_all_rx_queues(adapter, rxo, i) {
2230 sprintf(qname, "rxq%d", i);
2231 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2232 qname, rxo);
2233 if (status)
2234 goto err_msix;
2235 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002236
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002238
Sathya Perla3abcded2010-10-03 22:12:27 -07002239err_msix:
2240 be_free_irq(adapter, &adapter->tx_eq, adapter);
2241
2242 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2243 be_free_irq(adapter, &rxo->rx_eq, rxo);
2244
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245err:
2246 dev_warn(&adapter->pdev->dev,
2247 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002248 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249 return status;
2250}
2251
2252static int be_irq_register(struct be_adapter *adapter)
2253{
2254 struct net_device *netdev = adapter->netdev;
2255 int status;
2256
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002257 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 status = be_msix_register(adapter);
2259 if (status == 0)
2260 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002261 /* INTx is not supported for VF */
2262 if (!be_physfn(adapter))
2263 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264 }
2265
2266 /* INTx */
2267 netdev->irq = adapter->pdev->irq;
2268 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2269 adapter);
2270 if (status) {
2271 dev_err(&adapter->pdev->dev,
2272 "INTx request IRQ failed - err %d\n", status);
2273 return status;
2274 }
2275done:
2276 adapter->isr_registered = true;
2277 return 0;
2278}
2279
2280static void be_irq_unregister(struct be_adapter *adapter)
2281{
2282 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002283 struct be_rx_obj *rxo;
2284 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285
2286 if (!adapter->isr_registered)
2287 return;
2288
2289 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002290 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291 free_irq(netdev->irq, adapter);
2292 goto done;
2293 }
2294
2295 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 be_free_irq(adapter, &adapter->tx_eq, adapter);
2297
2298 for_all_rx_queues(adapter, rxo, i)
2299 be_free_irq(adapter, &rxo->rx_eq, rxo);
2300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301done:
2302 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303}
2304
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305static int be_close(struct net_device *netdev)
2306{
2307 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002309 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002312
Sathya Perla889cd4b2010-05-30 23:33:45 +00002313 be_async_mcc_disable(adapter);
2314
Sathya Perla889cd4b2010-05-30 23:33:45 +00002315 netif_carrier_off(netdev);
2316 adapter->link_up = false;
2317
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002318 if (!lancer_chip(adapter))
2319 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002320
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002321 for_all_rx_queues(adapter, rxo, i)
2322 napi_disable(&rxo->rx_eq.napi);
2323
2324 napi_disable(&tx_eq->napi);
2325
2326 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002327 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2328 for_all_rx_queues(adapter, rxo, i)
2329 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002330 for_all_tx_queues(adapter, txo, i)
2331 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002332 }
2333
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002334 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002335 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002336 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002337
2338 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002339 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002340 synchronize_irq(vec);
2341 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002342 } else {
2343 synchronize_irq(netdev->irq);
2344 }
2345 be_irq_unregister(adapter);
2346
Sathya Perla889cd4b2010-05-30 23:33:45 +00002347 /* Wait for all pending tx completions to arrive so that
2348 * all tx skbs are freed.
2349 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002350 for_all_tx_queues(adapter, txo, i)
2351 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002352
2353 return 0;
2354}
2355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356static int be_open(struct net_device *netdev)
2357{
2358 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002360 struct be_rx_obj *rxo;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002361 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002362 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002363 u8 mac_speed;
2364 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002365
Sathya Perla3abcded2010-10-03 22:12:27 -07002366 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002367 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002368 napi_enable(&rxo->rx_eq.napi);
2369 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002370 napi_enable(&tx_eq->napi);
2371
2372 be_irq_register(adapter);
2373
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002374 if (!lancer_chip(adapter))
2375 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002376
2377 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002378 for_all_rx_queues(adapter, rxo, i) {
2379 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2380 be_cq_notify(adapter, rxo->cq.id, true, 0);
2381 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002382 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002383
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002384 /* Now that interrupts are on we can process async mcc */
2385 be_async_mcc_enable(adapter);
2386
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002387 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002388 &link_speed, 0);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002389 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002390 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002391 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002392
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002393 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002394 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002395 if (status)
2396 goto err;
2397
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002398 status = be_cmd_set_flow_control(adapter,
2399 adapter->tx_fc, adapter->rx_fc);
2400 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002401 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002402 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002403
Sathya Perla889cd4b2010-05-30 23:33:45 +00002404 return 0;
2405err:
2406 be_close(adapter->netdev);
2407 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002408}
2409
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002410static int be_setup_wol(struct be_adapter *adapter, bool enable)
2411{
2412 struct be_dma_mem cmd;
2413 int status = 0;
2414 u8 mac[ETH_ALEN];
2415
2416 memset(mac, 0, ETH_ALEN);
2417
2418 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002419 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2420 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002421 if (cmd.va == NULL)
2422 return -1;
2423 memset(cmd.va, 0, cmd.size);
2424
2425 if (enable) {
2426 status = pci_write_config_dword(adapter->pdev,
2427 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2428 if (status) {
2429 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002430 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002431 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2432 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002433 return status;
2434 }
2435 status = be_cmd_enable_magic_wol(adapter,
2436 adapter->netdev->dev_addr, &cmd);
2437 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2438 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2439 } else {
2440 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2443 }
2444
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002445 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002446 return status;
2447}
2448
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002449/*
2450 * Generate a seed MAC address from the PF MAC Address using jhash.
2451 * MAC Address for VFs are assigned incrementally starting from the seed.
2452 * These addresses are programmed in the ASIC by the PF and the VF driver
2453 * queries for the MAC address during its probe.
2454 */
2455static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2456{
2457 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002458 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002459 u8 mac[ETH_ALEN];
2460
2461 be_vf_eth_addr_generate(adapter, mac);
2462
2463 for (vf = 0; vf < num_vfs; vf++) {
2464 status = be_cmd_pmac_add(adapter, mac,
2465 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002466 &adapter->vf_cfg[vf].vf_pmac_id,
2467 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002468 if (status)
2469 dev_err(&adapter->pdev->dev,
2470 "Mac address add failed for VF %d\n", vf);
2471 else
2472 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2473
2474 mac[5] += 1;
2475 }
2476 return status;
2477}
2478
2479static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2480{
2481 u32 vf;
2482
2483 for (vf = 0; vf < num_vfs; vf++) {
2484 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2485 be_cmd_pmac_del(adapter,
2486 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002487 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002488 }
2489}
2490
Sathya Perla5fb379e2009-06-18 00:02:59 +00002491static int be_setup(struct be_adapter *adapter)
2492{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002493 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002494 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002496 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002498 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2499 BE_IF_FLAGS_BROADCAST |
2500 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002501
2502 if (be_physfn(adapter)) {
2503 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2504 BE_IF_FLAGS_PROMISCUOUS |
2505 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2506 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002507
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002508 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002509 cap_flags |= BE_IF_FLAGS_RSS;
2510 en_flags |= BE_IF_FLAGS_RSS;
2511 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002512 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002513
2514 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2515 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002516 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517 if (status != 0)
2518 goto do_none;
2519
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002520 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002521 if (adapter->sriov_enabled) {
2522 while (vf < num_vfs) {
2523 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2524 BE_IF_FLAGS_BROADCAST;
2525 status = be_cmd_if_create(adapter, cap_flags,
2526 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002527 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002528 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002529 if (status) {
2530 dev_err(&adapter->pdev->dev,
2531 "Interface Create failed for VF %d\n",
2532 vf);
2533 goto if_destroy;
2534 }
2535 adapter->vf_cfg[vf].vf_pmac_id =
2536 BE_INVALID_PMAC_ID;
2537 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002538 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002539 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002540 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002541 status = be_cmd_mac_addr_query(adapter, mac,
2542 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2543 if (!status) {
2544 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2545 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2546 }
2547 }
2548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549 status = be_tx_queues_create(adapter);
2550 if (status != 0)
2551 goto if_destroy;
2552
2553 status = be_rx_queues_create(adapter);
2554 if (status != 0)
2555 goto tx_qs_destroy;
2556
Sathya Perla5fb379e2009-06-18 00:02:59 +00002557 status = be_mcc_queues_create(adapter);
2558 if (status != 0)
2559 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002561 adapter->link_speed = -1;
2562
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002563 return 0;
2564
Sathya Perla5fb379e2009-06-18 00:02:59 +00002565rx_qs_destroy:
2566 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567tx_qs_destroy:
2568 be_tx_queues_destroy(adapter);
2569if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002570 if (be_physfn(adapter) && adapter->sriov_enabled)
2571 for (vf = 0; vf < num_vfs; vf++)
2572 if (adapter->vf_cfg[vf].vf_if_handle)
2573 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002574 adapter->vf_cfg[vf].vf_if_handle,
2575 vf + 1);
2576 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002577do_none:
2578 return status;
2579}
2580
Sathya Perla5fb379e2009-06-18 00:02:59 +00002581static int be_clear(struct be_adapter *adapter)
2582{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002583 int vf;
2584
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002585 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002586 be_vf_eth_addr_rem(adapter);
2587
Sathya Perla1a8887d2009-08-17 00:58:41 +00002588 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002589 be_rx_queues_destroy(adapter);
2590 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002591 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002592
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002593 if (be_physfn(adapter) && adapter->sriov_enabled)
2594 for (vf = 0; vf < num_vfs; vf++)
2595 if (adapter->vf_cfg[vf].vf_if_handle)
2596 be_cmd_if_destroy(adapter,
2597 adapter->vf_cfg[vf].vf_if_handle,
2598 vf + 1);
2599
Ajit Khaparde658681f2011-02-11 13:34:46 +00002600 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002601
Sathya Perla2243e2e2009-11-22 22:02:03 +00002602 /* tell fw we're done with firing cmds */
2603 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002604 return 0;
2605}
2606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607
Ajit Khaparde84517482009-09-04 03:12:16 +00002608#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002609static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002610 const u8 *p, u32 img_start, int image_size,
2611 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002612{
2613 u32 crc_offset;
2614 u8 flashed_crc[4];
2615 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002616
2617 crc_offset = hdr_size + img_start + image_size - 4;
2618
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002619 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002620
2621 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002622 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002623 if (status) {
2624 dev_err(&adapter->pdev->dev,
2625 "could not get crc from flash, not flashing redboot\n");
2626 return false;
2627 }
2628
2629 /*update redboot only if crc does not match*/
2630 if (!memcmp(flashed_crc, p, 4))
2631 return false;
2632 else
2633 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002634}
2635
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002636static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002637 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002638 struct be_dma_mem *flash_cmd, int num_of_images)
2639
Ajit Khaparde84517482009-09-04 03:12:16 +00002640{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002641 int status = 0, i, filehdr_size = 0;
2642 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002643 int num_bytes;
2644 const u8 *p = fw->data;
2645 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002646 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002647 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002648
Joe Perches215faf92010-12-21 02:16:10 -08002649 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002650 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2651 FLASH_IMAGE_MAX_SIZE_g3},
2652 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2653 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2654 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2655 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2656 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2657 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2658 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2659 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2660 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2661 FLASH_IMAGE_MAX_SIZE_g3},
2662 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2663 FLASH_IMAGE_MAX_SIZE_g3},
2664 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002665 FLASH_IMAGE_MAX_SIZE_g3},
2666 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2667 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002668 };
Joe Perches215faf92010-12-21 02:16:10 -08002669 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002670 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2671 FLASH_IMAGE_MAX_SIZE_g2},
2672 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2673 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2674 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2675 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2676 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2677 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2678 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2679 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2680 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2681 FLASH_IMAGE_MAX_SIZE_g2},
2682 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2683 FLASH_IMAGE_MAX_SIZE_g2},
2684 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2685 FLASH_IMAGE_MAX_SIZE_g2}
2686 };
2687
2688 if (adapter->generation == BE_GEN3) {
2689 pflashcomp = gen3_flash_types;
2690 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002691 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002692 } else {
2693 pflashcomp = gen2_flash_types;
2694 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002695 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002696 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002697 for (i = 0; i < num_comp; i++) {
2698 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2699 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2700 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002701 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2702 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002703 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2704 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002705 continue;
2706 p = fw->data;
2707 p += filehdr_size + pflashcomp[i].offset
2708 + (num_of_images * sizeof(struct image_hdr));
2709 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002710 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002711 total_bytes = pflashcomp[i].size;
2712 while (total_bytes) {
2713 if (total_bytes > 32*1024)
2714 num_bytes = 32*1024;
2715 else
2716 num_bytes = total_bytes;
2717 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002718
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002719 if (!total_bytes)
2720 flash_op = FLASHROM_OPER_FLASH;
2721 else
2722 flash_op = FLASHROM_OPER_SAVE;
2723 memcpy(req->params.data_buf, p, num_bytes);
2724 p += num_bytes;
2725 status = be_cmd_write_flashrom(adapter, flash_cmd,
2726 pflashcomp[i].optype, flash_op, num_bytes);
2727 if (status) {
2728 dev_err(&adapter->pdev->dev,
2729 "cmd to write to flash rom failed.\n");
2730 return -1;
2731 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002732 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002733 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002734 return 0;
2735}
2736
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002737static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2738{
2739 if (fhdr == NULL)
2740 return 0;
2741 if (fhdr->build[0] == '3')
2742 return BE_GEN3;
2743 else if (fhdr->build[0] == '2')
2744 return BE_GEN2;
2745 else
2746 return 0;
2747}
2748
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002749static int lancer_fw_download(struct be_adapter *adapter,
2750 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002751{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002752#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2753#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2754 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002755 const u8 *data_ptr = NULL;
2756 u8 *dest_image_ptr = NULL;
2757 size_t image_size = 0;
2758 u32 chunk_size = 0;
2759 u32 data_written = 0;
2760 u32 offset = 0;
2761 int status = 0;
2762 u8 add_status = 0;
2763
2764 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2765 dev_err(&adapter->pdev->dev,
2766 "FW Image not properly aligned. "
2767 "Length must be 4 byte aligned.\n");
2768 status = -EINVAL;
2769 goto lancer_fw_exit;
2770 }
2771
2772 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2773 + LANCER_FW_DOWNLOAD_CHUNK;
2774 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2775 &flash_cmd.dma, GFP_KERNEL);
2776 if (!flash_cmd.va) {
2777 status = -ENOMEM;
2778 dev_err(&adapter->pdev->dev,
2779 "Memory allocation failure while flashing\n");
2780 goto lancer_fw_exit;
2781 }
2782
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002783 dest_image_ptr = flash_cmd.va +
2784 sizeof(struct lancer_cmd_req_write_object);
2785 image_size = fw->size;
2786 data_ptr = fw->data;
2787
2788 while (image_size) {
2789 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2790
2791 /* Copy the image chunk content. */
2792 memcpy(dest_image_ptr, data_ptr, chunk_size);
2793
2794 status = lancer_cmd_write_object(adapter, &flash_cmd,
2795 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2796 &data_written, &add_status);
2797
2798 if (status)
2799 break;
2800
2801 offset += data_written;
2802 data_ptr += data_written;
2803 image_size -= data_written;
2804 }
2805
2806 if (!status) {
2807 /* Commit the FW written */
2808 status = lancer_cmd_write_object(adapter, &flash_cmd,
2809 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2810 &data_written, &add_status);
2811 }
2812
2813 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2814 flash_cmd.dma);
2815 if (status) {
2816 dev_err(&adapter->pdev->dev,
2817 "Firmware load error. "
2818 "Status code: 0x%x Additional Status: 0x%x\n",
2819 status, add_status);
2820 goto lancer_fw_exit;
2821 }
2822
2823 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2824lancer_fw_exit:
2825 return status;
2826}
2827
2828static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2829{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002830 struct flash_file_hdr_g2 *fhdr;
2831 struct flash_file_hdr_g3 *fhdr3;
2832 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002833 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002834 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002835 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002836
2837 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002838 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002839
Ajit Khaparde84517482009-09-04 03:12:16 +00002840 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002841 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2842 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002843 if (!flash_cmd.va) {
2844 status = -ENOMEM;
2845 dev_err(&adapter->pdev->dev,
2846 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002847 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002848 }
2849
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002850 if ((adapter->generation == BE_GEN3) &&
2851 (get_ufigen_type(fhdr) == BE_GEN3)) {
2852 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002853 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2854 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002855 img_hdr_ptr = (struct image_hdr *) (fw->data +
2856 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002857 i * sizeof(struct image_hdr)));
2858 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2859 status = be_flash_data(adapter, fw, &flash_cmd,
2860 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002861 }
2862 } else if ((adapter->generation == BE_GEN2) &&
2863 (get_ufigen_type(fhdr) == BE_GEN2)) {
2864 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2865 } else {
2866 dev_err(&adapter->pdev->dev,
2867 "UFI and Interface are not compatible for flashing\n");
2868 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002869 }
2870
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002871 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2872 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002873 if (status) {
2874 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002875 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002876 }
2877
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002878 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002879
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002880be_fw_exit:
2881 return status;
2882}
2883
2884int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2885{
2886 const struct firmware *fw;
2887 int status;
2888
2889 if (!netif_running(adapter->netdev)) {
2890 dev_err(&adapter->pdev->dev,
2891 "Firmware load not allowed (interface is down)\n");
2892 return -1;
2893 }
2894
2895 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2896 if (status)
2897 goto fw_exit;
2898
2899 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2900
2901 if (lancer_chip(adapter))
2902 status = lancer_fw_download(adapter, fw);
2903 else
2904 status = be_fw_download(adapter, fw);
2905
Ajit Khaparde84517482009-09-04 03:12:16 +00002906fw_exit:
2907 release_firmware(fw);
2908 return status;
2909}
2910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911static struct net_device_ops be_netdev_ops = {
2912 .ndo_open = be_open,
2913 .ndo_stop = be_close,
2914 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915 .ndo_set_rx_mode = be_set_multicast_list,
2916 .ndo_set_mac_address = be_mac_addr_set,
2917 .ndo_change_mtu = be_change_mtu,
2918 .ndo_validate_addr = eth_validate_addr,
2919 .ndo_vlan_rx_register = be_vlan_register,
2920 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2921 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002922 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002923 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002924 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002925 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926};
2927
2928static void be_netdev_init(struct net_device *netdev)
2929{
2930 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002931 struct be_rx_obj *rxo;
2932 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002934 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002935 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2936 NETIF_F_HW_VLAN_TX;
2937 if (be_multi_rxq(adapter))
2938 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002939
2940 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002941 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002942
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002943 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002944 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002945
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946 netdev->flags |= IFF_MULTICAST;
2947
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002948 /* Default settings for Rx and Tx flow control */
2949 adapter->rx_fc = true;
2950 adapter->tx_fc = true;
2951
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002952 netif_set_gso_max_size(netdev, 65535);
2953
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002954 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2955
2956 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2957
Sathya Perla3abcded2010-10-03 22:12:27 -07002958 for_all_rx_queues(adapter, rxo, i)
2959 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2960 BE_NAPI_WEIGHT);
2961
Sathya Perla5fb379e2009-06-18 00:02:59 +00002962 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964}
2965
2966static void be_unmap_pci_bars(struct be_adapter *adapter)
2967{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002968 if (adapter->csr)
2969 iounmap(adapter->csr);
2970 if (adapter->db)
2971 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002972 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002973 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974}
2975
2976static int be_map_pci_bars(struct be_adapter *adapter)
2977{
2978 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002979 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002981 if (lancer_chip(adapter)) {
2982 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2983 pci_resource_len(adapter->pdev, 0));
2984 if (addr == NULL)
2985 return -ENOMEM;
2986 adapter->db = addr;
2987 return 0;
2988 }
2989
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002990 if (be_physfn(adapter)) {
2991 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2992 pci_resource_len(adapter->pdev, 2));
2993 if (addr == NULL)
2994 return -ENOMEM;
2995 adapter->csr = addr;
2996 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002998 if (adapter->generation == BE_GEN2) {
2999 pcicfg_reg = 1;
3000 db_reg = 4;
3001 } else {
3002 pcicfg_reg = 0;
3003 if (be_physfn(adapter))
3004 db_reg = 4;
3005 else
3006 db_reg = 0;
3007 }
3008 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3009 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010 if (addr == NULL)
3011 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003012 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003014 if (be_physfn(adapter)) {
3015 addr = ioremap_nocache(
3016 pci_resource_start(adapter->pdev, pcicfg_reg),
3017 pci_resource_len(adapter->pdev, pcicfg_reg));
3018 if (addr == NULL)
3019 goto pci_map_err;
3020 adapter->pcicfg = addr;
3021 } else
3022 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023
3024 return 0;
3025pci_map_err:
3026 be_unmap_pci_bars(adapter);
3027 return -ENOMEM;
3028}
3029
3030
3031static void be_ctrl_cleanup(struct be_adapter *adapter)
3032{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003033 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034
3035 be_unmap_pci_bars(adapter);
3036
3037 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003038 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3039 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003040
3041 mem = &adapter->mc_cmd_mem;
3042 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003043 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3044 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003045}
3046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047static int be_ctrl_init(struct be_adapter *adapter)
3048{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003049 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3050 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003051 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053
3054 status = be_map_pci_bars(adapter);
3055 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003056 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057
3058 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003059 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3060 mbox_mem_alloc->size,
3061 &mbox_mem_alloc->dma,
3062 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003064 status = -ENOMEM;
3065 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003066 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003067
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003068 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3069 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3070 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3071 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003072
3073 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003074 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3075 mc_cmd_mem->size, &mc_cmd_mem->dma,
3076 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003077 if (mc_cmd_mem->va == NULL) {
3078 status = -ENOMEM;
3079 goto free_mbox;
3080 }
3081 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3082
Ivan Vecera29849612010-12-14 05:43:19 +00003083 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003084 spin_lock_init(&adapter->mcc_lock);
3085 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003087 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003088 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003090
3091free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003092 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3093 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003094
3095unmap_pci_bars:
3096 be_unmap_pci_bars(adapter);
3097
3098done:
3099 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100}
3101
3102static void be_stats_cleanup(struct be_adapter *adapter)
3103{
Sathya Perla3abcded2010-10-03 22:12:27 -07003104 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003105
3106 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003107 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3108 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109}
3110
3111static int be_stats_init(struct be_adapter *adapter)
3112{
Sathya Perla3abcded2010-10-03 22:12:27 -07003113 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003114
Selvin Xavier005d5692011-05-16 07:36:35 +00003115 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003116 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003117 } else {
3118 if (lancer_chip(adapter))
3119 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3120 else
3121 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3122 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003123 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3124 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125 if (cmd->va == NULL)
3126 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003127 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128 return 0;
3129}
3130
3131static void __devexit be_remove(struct pci_dev *pdev)
3132{
3133 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135 if (!adapter)
3136 return;
3137
Somnath Koturf203af72010-10-25 23:01:03 +00003138 cancel_delayed_work_sync(&adapter->work);
3139
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140 unregister_netdev(adapter->netdev);
3141
Sathya Perla5fb379e2009-06-18 00:02:59 +00003142 be_clear(adapter);
3143
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003144 be_stats_cleanup(adapter);
3145
3146 be_ctrl_cleanup(adapter);
3147
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003148 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003149 be_sriov_disable(adapter);
3150
Sathya Perla8d56ff12009-11-22 22:02:26 +00003151 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003152
3153 pci_set_drvdata(pdev, NULL);
3154 pci_release_regions(pdev);
3155 pci_disable_device(pdev);
3156
3157 free_netdev(adapter->netdev);
3158}
3159
Sathya Perla2243e2e2009-11-22 22:02:03 +00003160static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003161{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003163 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003164
Sathya Perla8788fdc2009-07-27 22:52:03 +00003165 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166 if (status)
3167 return status;
3168
Sathya Perla3abcded2010-10-03 22:12:27 -07003169 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3170 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003171 if (status)
3172 return status;
3173
3174 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003175
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003176 /* A default permanent address is given to each VF for Lancer*/
3177 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003178 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003179 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003180
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003181 if (status)
3182 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003183
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003184 if (!is_valid_ether_addr(mac))
3185 return -EADDRNOTAVAIL;
3186
3187 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3188 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3189 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003190
Ajit Khaparde3486be22010-07-23 02:04:54 +00003191 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003192 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3193 else
3194 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3195
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003196 status = be_cmd_get_cntl_attributes(adapter);
3197 if (status)
3198 return status;
3199
Sathya Perla2e588f82011-03-11 02:49:26 +00003200 be_cmd_check_native_mode(adapter);
Sathya Perla3c8def92011-06-12 20:01:58 +00003201
3202 if ((num_vfs && adapter->sriov_enabled) ||
3203 (adapter->function_mode & 0x400) ||
3204 lancer_chip(adapter) || !be_physfn(adapter)) {
3205 adapter->num_tx_qs = 1;
3206 netif_set_real_num_tx_queues(adapter->netdev,
3207 adapter->num_tx_qs);
3208 } else {
3209 adapter->num_tx_qs = MAX_TX_QS;
3210 }
3211
Sathya Perla2243e2e2009-11-22 22:02:03 +00003212 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213}
3214
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003215static int be_dev_family_check(struct be_adapter *adapter)
3216{
3217 struct pci_dev *pdev = adapter->pdev;
3218 u32 sli_intf = 0, if_type;
3219
3220 switch (pdev->device) {
3221 case BE_DEVICE_ID1:
3222 case OC_DEVICE_ID1:
3223 adapter->generation = BE_GEN2;
3224 break;
3225 case BE_DEVICE_ID2:
3226 case OC_DEVICE_ID2:
3227 adapter->generation = BE_GEN3;
3228 break;
3229 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003230 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003231 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3232 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3233 SLI_INTF_IF_TYPE_SHIFT;
3234
3235 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3236 if_type != 0x02) {
3237 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3238 return -EINVAL;
3239 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003240 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3241 SLI_INTF_FAMILY_SHIFT);
3242 adapter->generation = BE_GEN3;
3243 break;
3244 default:
3245 adapter->generation = 0;
3246 }
3247 return 0;
3248}
3249
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003250static int lancer_wait_ready(struct be_adapter *adapter)
3251{
3252#define SLIPORT_READY_TIMEOUT 500
3253 u32 sliport_status;
3254 int status = 0, i;
3255
3256 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3257 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3258 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3259 break;
3260
3261 msleep(20);
3262 }
3263
3264 if (i == SLIPORT_READY_TIMEOUT)
3265 status = -1;
3266
3267 return status;
3268}
3269
3270static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3271{
3272 int status;
3273 u32 sliport_status, err, reset_needed;
3274 status = lancer_wait_ready(adapter);
3275 if (!status) {
3276 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3277 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3278 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3279 if (err && reset_needed) {
3280 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3281 adapter->db + SLIPORT_CONTROL_OFFSET);
3282
3283 /* check adapter has corrected the error */
3284 status = lancer_wait_ready(adapter);
3285 sliport_status = ioread32(adapter->db +
3286 SLIPORT_STATUS_OFFSET);
3287 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3288 SLIPORT_STATUS_RN_MASK);
3289 if (status || sliport_status)
3290 status = -1;
3291 } else if (err || reset_needed) {
3292 status = -1;
3293 }
3294 }
3295 return status;
3296}
3297
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298static int __devinit be_probe(struct pci_dev *pdev,
3299 const struct pci_device_id *pdev_id)
3300{
3301 int status = 0;
3302 struct be_adapter *adapter;
3303 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003304
3305 status = pci_enable_device(pdev);
3306 if (status)
3307 goto do_none;
3308
3309 status = pci_request_regions(pdev, DRV_NAME);
3310 if (status)
3311 goto disable_dev;
3312 pci_set_master(pdev);
3313
Sathya Perla3c8def92011-06-12 20:01:58 +00003314 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 if (netdev == NULL) {
3316 status = -ENOMEM;
3317 goto rel_reg;
3318 }
3319 adapter = netdev_priv(netdev);
3320 adapter->pdev = pdev;
3321 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003322
3323 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003324 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003325 goto free_netdev;
3326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003328 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003330 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331 if (!status) {
3332 netdev->features |= NETIF_F_HIGHDMA;
3333 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003334 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335 if (status) {
3336 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3337 goto free_netdev;
3338 }
3339 }
3340
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003341 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003342 if (adapter->sriov_enabled) {
3343 adapter->vf_cfg = kcalloc(num_vfs,
3344 sizeof(struct be_vf_cfg), GFP_KERNEL);
3345
3346 if (!adapter->vf_cfg)
3347 goto free_netdev;
3348 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003349
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003350 status = be_ctrl_init(adapter);
3351 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003352 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003354 if (lancer_chip(adapter)) {
3355 status = lancer_test_and_set_rdy_state(adapter);
3356 if (status) {
3357 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003358 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003359 }
3360 }
3361
Sathya Perla2243e2e2009-11-22 22:02:03 +00003362 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003363 if (be_physfn(adapter)) {
3364 status = be_cmd_POST(adapter);
3365 if (status)
3366 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003367 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003368
3369 /* tell fw we're ready to fire cmds */
3370 status = be_cmd_fw_init(adapter);
3371 if (status)
3372 goto ctrl_clean;
3373
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003374 status = be_cmd_reset_function(adapter);
3375 if (status)
3376 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378 status = be_stats_init(adapter);
3379 if (status)
3380 goto ctrl_clean;
3381
Sathya Perla2243e2e2009-11-22 22:02:03 +00003382 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383 if (status)
3384 goto stats_clean;
3385
Sathya Perla3abcded2010-10-03 22:12:27 -07003386 be_msix_enable(adapter);
3387
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003389
Sathya Perla5fb379e2009-06-18 00:02:59 +00003390 status = be_setup(adapter);
3391 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003392 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003393
Sathya Perla3abcded2010-10-03 22:12:27 -07003394 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003395 status = register_netdev(netdev);
3396 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003397 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003398 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399
Ajit Khapardee6319362011-02-11 13:35:41 +00003400 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003401 u8 mac_speed;
3402 bool link_up;
3403 u16 vf, lnk_speed;
3404
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003405 if (!lancer_chip(adapter)) {
3406 status = be_vf_eth_addr_config(adapter);
3407 if (status)
3408 goto unreg_netdev;
3409 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003410
3411 for (vf = 0; vf < num_vfs; vf++) {
3412 status = be_cmd_link_status_query(adapter, &link_up,
3413 &mac_speed, &lnk_speed, vf + 1);
3414 if (!status)
3415 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3416 else
3417 goto unreg_netdev;
3418 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003419 }
3420
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003421 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003422 /* By default all priorities are enabled.
3423 * Needed in case of no GRP5 evt support
3424 */
3425 adapter->vlan_prio_bmap = 0xff;
3426
Somnath Koturf203af72010-10-25 23:01:03 +00003427 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003428 return 0;
3429
Ajit Khapardee6319362011-02-11 13:35:41 +00003430unreg_netdev:
3431 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003432unsetup:
3433 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003434msix_disable:
3435 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003436stats_clean:
3437 be_stats_cleanup(adapter);
3438ctrl_clean:
3439 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003440free_vf_cfg:
3441 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003443 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003444 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003445 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446rel_reg:
3447 pci_release_regions(pdev);
3448disable_dev:
3449 pci_disable_device(pdev);
3450do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003451 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003452 return status;
3453}
3454
3455static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3456{
3457 struct be_adapter *adapter = pci_get_drvdata(pdev);
3458 struct net_device *netdev = adapter->netdev;
3459
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003460 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003461 if (adapter->wol)
3462 be_setup_wol(adapter, true);
3463
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 netif_device_detach(netdev);
3465 if (netif_running(netdev)) {
3466 rtnl_lock();
3467 be_close(netdev);
3468 rtnl_unlock();
3469 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003470 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003471 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003472
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003473 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003474 pci_save_state(pdev);
3475 pci_disable_device(pdev);
3476 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3477 return 0;
3478}
3479
3480static int be_resume(struct pci_dev *pdev)
3481{
3482 int status = 0;
3483 struct be_adapter *adapter = pci_get_drvdata(pdev);
3484 struct net_device *netdev = adapter->netdev;
3485
3486 netif_device_detach(netdev);
3487
3488 status = pci_enable_device(pdev);
3489 if (status)
3490 return status;
3491
3492 pci_set_power_state(pdev, 0);
3493 pci_restore_state(pdev);
3494
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003495 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003496 /* tell fw we're ready to fire cmds */
3497 status = be_cmd_fw_init(adapter);
3498 if (status)
3499 return status;
3500
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003501 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003502 if (netif_running(netdev)) {
3503 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003504 be_open(netdev);
3505 rtnl_unlock();
3506 }
3507 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003508
3509 if (adapter->wol)
3510 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003511
3512 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513 return 0;
3514}
3515
Sathya Perla82456b02010-02-17 01:35:37 +00003516/*
3517 * An FLR will stop BE from DMAing any data.
3518 */
3519static void be_shutdown(struct pci_dev *pdev)
3520{
3521 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003522
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003523 if (!adapter)
3524 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003525
Sathya Perla0f4a6822011-03-21 20:49:28 +00003526 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003527
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003528 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003529
Sathya Perla82456b02010-02-17 01:35:37 +00003530 if (adapter->wol)
3531 be_setup_wol(adapter, true);
3532
Ajit Khaparde57841862011-04-06 18:08:43 +00003533 be_cmd_reset_function(adapter);
3534
Sathya Perla82456b02010-02-17 01:35:37 +00003535 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003536}
3537
Sathya Perlacf588472010-02-14 21:22:01 +00003538static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3539 pci_channel_state_t state)
3540{
3541 struct be_adapter *adapter = pci_get_drvdata(pdev);
3542 struct net_device *netdev = adapter->netdev;
3543
3544 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3545
3546 adapter->eeh_err = true;
3547
3548 netif_device_detach(netdev);
3549
3550 if (netif_running(netdev)) {
3551 rtnl_lock();
3552 be_close(netdev);
3553 rtnl_unlock();
3554 }
3555 be_clear(adapter);
3556
3557 if (state == pci_channel_io_perm_failure)
3558 return PCI_ERS_RESULT_DISCONNECT;
3559
3560 pci_disable_device(pdev);
3561
3562 return PCI_ERS_RESULT_NEED_RESET;
3563}
3564
3565static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3566{
3567 struct be_adapter *adapter = pci_get_drvdata(pdev);
3568 int status;
3569
3570 dev_info(&adapter->pdev->dev, "EEH reset\n");
3571 adapter->eeh_err = false;
3572
3573 status = pci_enable_device(pdev);
3574 if (status)
3575 return PCI_ERS_RESULT_DISCONNECT;
3576
3577 pci_set_master(pdev);
3578 pci_set_power_state(pdev, 0);
3579 pci_restore_state(pdev);
3580
3581 /* Check if card is ok and fw is ready */
3582 status = be_cmd_POST(adapter);
3583 if (status)
3584 return PCI_ERS_RESULT_DISCONNECT;
3585
3586 return PCI_ERS_RESULT_RECOVERED;
3587}
3588
3589static void be_eeh_resume(struct pci_dev *pdev)
3590{
3591 int status = 0;
3592 struct be_adapter *adapter = pci_get_drvdata(pdev);
3593 struct net_device *netdev = adapter->netdev;
3594
3595 dev_info(&adapter->pdev->dev, "EEH resume\n");
3596
3597 pci_save_state(pdev);
3598
3599 /* tell fw we're ready to fire cmds */
3600 status = be_cmd_fw_init(adapter);
3601 if (status)
3602 goto err;
3603
3604 status = be_setup(adapter);
3605 if (status)
3606 goto err;
3607
3608 if (netif_running(netdev)) {
3609 status = be_open(netdev);
3610 if (status)
3611 goto err;
3612 }
3613 netif_device_attach(netdev);
3614 return;
3615err:
3616 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003617}
3618
3619static struct pci_error_handlers be_eeh_handlers = {
3620 .error_detected = be_eeh_err_detected,
3621 .slot_reset = be_eeh_reset,
3622 .resume = be_eeh_resume,
3623};
3624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003625static struct pci_driver be_driver = {
3626 .name = DRV_NAME,
3627 .id_table = be_dev_ids,
3628 .probe = be_probe,
3629 .remove = be_remove,
3630 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003631 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003632 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003633 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003634};
3635
3636static int __init be_init_module(void)
3637{
Joe Perches8e95a202009-12-03 07:58:21 +00003638 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3639 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003640 printk(KERN_WARNING DRV_NAME
3641 " : Module param rx_frag_size must be 2048/4096/8192."
3642 " Using 2048\n");
3643 rx_frag_size = 2048;
3644 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646 return pci_register_driver(&be_driver);
3647}
3648module_init(be_init_module);
3649
3650static void __exit be_exit_module(void)
3651{
3652 pci_unregister_driver(&be_driver);
3653}
3654module_exit(be_exit_module);