blob: a58baf35d2299dfc1c52344ab6b09233c01d8e58 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000021#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070022#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000023#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000024#include "bnx2x_cmn.h"
25
Dmitry Kravkov523224a2010-10-06 03:23:26 +000026#include "bnx2x_init.h"
27
stephen hemminger8d962862010-10-21 07:50:56 +000028static int bnx2x_setup_irqs(struct bnx2x *bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000053 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000054
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000117 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 " pkt_cons %u\n",
119 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122 sw_cons++;
123 }
124
125 fp->tx_pkt_cons = sw_cons;
126 fp->tx_bd_cons = bd_cons;
127
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
132 * forever.
133 */
134 smp_mb();
135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 if (unlikely(netif_tx_queue_stopped(txq))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 *
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
144 * stops the queue
145 */
146
147 __netif_tx_lock(txq, smp_processor_id());
148
149 if ((netif_tx_queue_stopped(txq)) &&
150 (bp->state == BNX2X_STATE_OPEN) &&
151 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152 netif_tx_wake_queue(txq);
153
154 __netif_tx_unlock(txq);
155 }
156 return 0;
157}
158
159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160 u16 idx)
161{
162 u16 last_max = fp->last_max_sge;
163
164 if (SUB_S16(idx, last_max) > 0)
165 fp->last_max_sge = idx;
166}
167
168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169 struct eth_fast_path_rx_cqe *fp_cqe)
170{
171 struct bnx2x *bp = fp->bp;
172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 SGE_PAGE_SHIFT;
175 u16 last_max, last_elem, first_elem;
176 u16 delta = 0;
177 u16 i;
178
179 if (!sge_len)
180 return;
181
182 /* First mark all used pages */
183 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000184 SGE_MASK_CLEAR_BIT(fp,
185 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
187 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000188 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000192 bnx2x_update_last_max_sge(fp,
193 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194
195 last_max = RX_SGE(fp->last_max_sge);
196 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199 /* If ring is not full */
200 if (last_elem + 1 != first_elem)
201 last_elem++;
202
203 /* Now update the prod */
204 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205 if (likely(fp->sge_mask[i]))
206 break;
207
208 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209 delta += RX_SGE_MASK_ELEM_SZ;
210 }
211
212 if (delta > 0) {
213 fp->rx_sge_prod += delta;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp);
216 }
217
218 DP(NETIF_MSG_RX_STATUS,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp->last_max_sge, fp->rx_sge_prod);
221}
222
223static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224 struct sk_buff *skb, u16 cons, u16 prod)
225{
226 struct bnx2x *bp = fp->bp;
227 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230 dma_addr_t mapping;
231
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp->tpa_pool[queue] = *cons_rx_buf;
240
241 /* mark bin state as start - print error if current state != stop */
242 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245 fp->tpa_state[queue] = BNX2X_TPA_START;
246
247 /* point prod_bd to new skb */
248 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251#ifdef BNX2X_STOP_ON_ERROR
252 fp->tpa_queue_used |= (1 << queue);
253#ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255#else
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257#endif
258 fp->tpa_queue_used);
259#endif
260}
261
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx)
266{
267 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
269 u32 i, frag_len, frag_size, pages;
270 int err;
271 int j;
272
273 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
274 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
275
276 /* This is needed in order to enable forwarding support */
277 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
279 max(frag_size, (u32)len_on_bd));
280
281#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
283 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
284 pages, cqe_idx);
285 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
286 fp_cqe->pkt_len, len_on_bd);
287 bnx2x_panic();
288 return -EINVAL;
289 }
290#endif
291
292 /* Run through the SGL and compose the fragmented skb */
293 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000294 u16 sge_idx =
295 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296
297 /* FW gives the indices of the SGE as if the ring is an array
298 (meaning that "next" element will consume 2 indices) */
299 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
300 rx_pg = &fp->rx_page_ring[sge_idx];
301 old_rx_pg = *rx_pg;
302
303 /* If we fail to allocate a substitute page, we simply stop
304 where we are and drop the whole packet */
305 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
306 if (unlikely(err)) {
307 fp->eth_q_stats.rx_skb_alloc_failed++;
308 return err;
309 }
310
311 /* Unmap the page as we r going to pass it to the stack */
312 dma_unmap_page(&bp->pdev->dev,
313 dma_unmap_addr(&old_rx_pg, mapping),
314 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
315
316 /* Add one frag and update the appropriate fields in the skb */
317 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
318
319 skb->data_len += frag_len;
320 skb->truesize += frag_len;
321 skb->len += frag_len;
322
323 frag_size -= frag_len;
324 }
325
326 return 0;
327}
328
329static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
330 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
331 u16 cqe_idx)
332{
333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334 struct sk_buff *skb = rx_buf->skb;
335 /* alloc new skb */
336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
337
338 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 fails. */
341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
342 bp->rx_buf_size, DMA_FROM_DEVICE);
343
344 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000347
348 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000349 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000350
351#ifdef BNX2X_STOP_ON_ERROR
352 if (pad + len > bp->rx_buf_size) {
353 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n",
355 pad, len, bp->rx_buf_size);
356 bnx2x_panic();
357 return;
358 }
359#endif
360
361 skb_reserve(skb, pad);
362 skb_put(skb, len);
363
364 skb->protocol = eth_type_trans(skb, bp->dev);
365 skb->ip_summed = CHECKSUM_UNNECESSARY;
366
367 {
368 struct iphdr *iph;
369
370 iph = (struct iphdr *)skb->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371 iph->check = 0;
372 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
373 }
374
375 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) {
Hao Zheng9bcc0892010-10-20 13:56:11 +0000377 if ((le16_to_cpu(cqe->fast_path_cqe.
378 pars_flags.flags) & PARSING_FLAGS_VLAN))
379 __vlan_hwaccel_put_tag(skb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380 le16_to_cpu(cqe->fast_path_cqe.
Hao Zheng9bcc0892010-10-20 13:56:11 +0000381 vlan_tag));
382 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383 } else {
384 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
385 " - dropping packet!\n");
386 dev_kfree_skb(skb);
387 }
388
389
390 /* put new skb in bin */
391 fp->tpa_pool[queue].skb = new_skb;
392
393 } else {
394 /* else drop the packet and keep the buffer in the bin */
395 DP(NETIF_MSG_RX_STATUS,
396 "Failed to allocate new skb - dropping packet!\n");
397 fp->eth_q_stats.rx_skb_alloc_failed++;
398 }
399
400 fp->tpa_state[queue] = BNX2X_TPA_STOP;
401}
402
403/* Set Toeplitz hash value in the skb using the value from the
404 * CQE (calculated by HW).
405 */
406static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
407 struct sk_buff *skb)
408{
409 /* Set Toeplitz hash from CQE */
410 if ((bp->dev->features & NETIF_F_RXHASH) &&
411 (cqe->fast_path_cqe.status_flags &
412 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
413 skb->rxhash =
414 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
415}
416
417int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
418{
419 struct bnx2x *bp = fp->bp;
420 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
421 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
422 int rx_pkt = 0;
423
424#ifdef BNX2X_STOP_ON_ERROR
425 if (unlikely(bp->panic))
426 return 0;
427#endif
428
429 /* CQ "next element" is of the size of the regular element,
430 that's why it's ok here */
431 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
432 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
433 hw_comp_cons++;
434
435 bd_cons = fp->rx_bd_cons;
436 bd_prod = fp->rx_bd_prod;
437 bd_prod_fw = bd_prod;
438 sw_comp_cons = fp->rx_comp_cons;
439 sw_comp_prod = fp->rx_comp_prod;
440
441 /* Memory barrier necessary as speculative reads of the rx
442 * buffer can be ahead of the index in the status block
443 */
444 rmb();
445
446 DP(NETIF_MSG_RX_STATUS,
447 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
448 fp->index, hw_comp_cons, sw_comp_cons);
449
450 while (sw_comp_cons != hw_comp_cons) {
451 struct sw_rx_bd *rx_buf = NULL;
452 struct sk_buff *skb;
453 union eth_rx_cqe *cqe;
454 u8 cqe_fp_flags;
455 u16 len, pad;
456
457 comp_ring_cons = RCQ_BD(sw_comp_cons);
458 bd_prod = RX_BD(bd_prod);
459 bd_cons = RX_BD(bd_cons);
460
461 /* Prefetch the page containing the BD descriptor
462 at producer's index. It will be needed when new skb is
463 allocated */
464 prefetch((void *)(PAGE_ALIGN((unsigned long)
465 (&fp->rx_desc_ring[bd_prod])) -
466 PAGE_SIZE + 1));
467
468 cqe = &fp->rx_comp_ring[comp_ring_cons];
469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
470
471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
477
478 /* is this a slowpath msg? */
479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
480 bnx2x_sp_event(fp, cqe);
481 goto next_cqe;
482
483 /* this is an rx packet */
484 } else {
485 rx_buf = &fp->rx_buf_ring[bd_cons];
486 skb = rx_buf->skb;
487 prefetch(skb);
488 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
489 pad = cqe->fast_path_cqe.placement_offset;
490
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000491 /* - If CQE is marked both TPA_START and TPA_END it is
492 * a non-TPA CQE.
493 * - FP CQE will always have either TPA_START or/and
494 * TPA_STOP flags set.
495 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000496 if ((!fp->disable_tpa) &&
497 (TPA_TYPE(cqe_fp_flags) !=
498 (TPA_TYPE_START | TPA_TYPE_END))) {
499 u16 queue = cqe->fast_path_cqe.queue_index;
500
501 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
502 DP(NETIF_MSG_RX_STATUS,
503 "calling tpa_start on queue %d\n",
504 queue);
505
506 bnx2x_tpa_start(fp, queue, skb,
507 bd_cons, bd_prod);
508
509 /* Set Toeplitz hash for an LRO skb */
510 bnx2x_set_skb_rxhash(bp, cqe, skb);
511
512 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000513 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 DP(NETIF_MSG_RX_STATUS,
515 "calling tpa_stop on queue %d\n",
516 queue);
517
518 if (!BNX2X_RX_SUM_FIX(cqe))
519 BNX2X_ERR("STOP on none TCP "
520 "data\n");
521
522 /* This is a size of the linear data
523 on this skb */
524 len = le16_to_cpu(cqe->fast_path_cqe.
525 len_on_bd);
526 bnx2x_tpa_stop(bp, fp, queue, pad,
527 len, cqe, comp_ring_cons);
528#ifdef BNX2X_STOP_ON_ERROR
529 if (bp->panic)
530 return 0;
531#endif
532
533 bnx2x_update_sge_prod(fp,
534 &cqe->fast_path_cqe);
535 goto next_cqe;
536 }
537 }
538
539 dma_sync_single_for_device(&bp->pdev->dev,
540 dma_unmap_addr(rx_buf, mapping),
541 pad + RX_COPY_THRESH,
542 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000543 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000544
545 /* is this an error packet? */
546 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
547 DP(NETIF_MSG_RX_ERR,
548 "ERROR flags %x rx packet %u\n",
549 cqe_fp_flags, sw_comp_cons);
550 fp->eth_q_stats.rx_err_discard_pkt++;
551 goto reuse_rx;
552 }
553
554 /* Since we don't have a jumbo ring
555 * copy small packets if mtu > 1500
556 */
557 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
558 (len <= RX_COPY_THRESH)) {
559 struct sk_buff *new_skb;
560
561 new_skb = netdev_alloc_skb(bp->dev,
562 len + pad);
563 if (new_skb == NULL) {
564 DP(NETIF_MSG_RX_ERR,
565 "ERROR packet dropped "
566 "because of alloc failure\n");
567 fp->eth_q_stats.rx_skb_alloc_failed++;
568 goto reuse_rx;
569 }
570
571 /* aligned copy */
572 skb_copy_from_linear_data_offset(skb, pad,
573 new_skb->data + pad, len);
574 skb_reserve(new_skb, pad);
575 skb_put(new_skb, len);
576
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000577 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
579 skb = new_skb;
580
581 } else
582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583 dma_unmap_single(&bp->pdev->dev,
584 dma_unmap_addr(rx_buf, mapping),
585 bp->rx_buf_size,
586 DMA_FROM_DEVICE);
587 skb_reserve(skb, pad);
588 skb_put(skb, len);
589
590 } else {
591 DP(NETIF_MSG_RX_ERR,
592 "ERROR packet dropped because "
593 "of alloc failure\n");
594 fp->eth_q_stats.rx_skb_alloc_failed++;
595reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 goto next_rx;
598 }
599
600 skb->protocol = eth_type_trans(skb, bp->dev);
601
602 /* Set Toeplitz hash for a none-LRO skb */
603 bnx2x_set_skb_rxhash(bp, cqe, skb);
604
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700605 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000606
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000607 if (bp->rx_csum) {
608 if (likely(BNX2X_RX_CSUM_OK(cqe)))
609 skb->ip_summed = CHECKSUM_UNNECESSARY;
610 else
611 fp->eth_q_stats.hw_csum_err++;
612 }
613 }
614
615 skb_record_rx_queue(skb, fp->index);
616
Hao Zheng9bcc0892010-10-20 13:56:11 +0000617 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
618 PARSING_FLAGS_VLAN)
619 __vlan_hwaccel_put_tag(skb,
620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
621 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622
623
624next_rx:
625 rx_buf->skb = NULL;
626
627 bd_cons = NEXT_RX_IDX(bd_cons);
628 bd_prod = NEXT_RX_IDX(bd_prod);
629 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
630 rx_pkt++;
631next_cqe:
632 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
633 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
634
635 if (rx_pkt == budget)
636 break;
637 } /* while */
638
639 fp->rx_bd_cons = bd_cons;
640 fp->rx_bd_prod = bd_prod_fw;
641 fp->rx_comp_cons = sw_comp_cons;
642 fp->rx_comp_prod = sw_comp_prod;
643
644 /* Update producers */
645 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
646 fp->rx_sge_prod);
647
648 fp->rx_pkt += rx_pkt;
649 fp->rx_calls++;
650
651 return rx_pkt;
652}
653
654static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
655{
656 struct bnx2x_fastpath *fp = fp_cookie;
657 struct bnx2x *bp = fp->bp;
658
659 /* Return here if interrupt is disabled */
660 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
661 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
662 return IRQ_HANDLED;
663 }
664
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000665 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
666 "[fp %d fw_sd %d igusb %d]\n",
667 fp->index, fp->fw_sb_id, fp->igu_sb_id);
668 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669
670#ifdef BNX2X_STOP_ON_ERROR
671 if (unlikely(bp->panic))
672 return IRQ_HANDLED;
673#endif
674
675 /* Handle Rx and Tx according to MSI-X vector */
676 prefetch(fp->rx_cons_sb);
677 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000678 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000679 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
680
681 return IRQ_HANDLED;
682}
683
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684/* HW Lock for shared dual port PHYs */
685void bnx2x_acquire_phy_lock(struct bnx2x *bp)
686{
687 mutex_lock(&bp->port.phy_mutex);
688
689 if (bp->port.need_hw_lock)
690 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
691}
692
693void bnx2x_release_phy_lock(struct bnx2x *bp)
694{
695 if (bp->port.need_hw_lock)
696 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
697
698 mutex_unlock(&bp->port.phy_mutex);
699}
700
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000706 u16 maxCfg = bnx2x_extract_max_cfg(bp,
707 bp->mf_config[BP_VN(bp)]);
708
709 /* Calculate the current MAX line speed limit for the MF
710 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800711 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000712 if (IS_MF_SI(bp))
713 line_speed = (line_speed * maxCfg) / 100;
714 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800715 u16 vn_max_rate = maxCfg * 100;
716
717 if (vn_max_rate < line_speed)
718 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000719 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800720 }
721
722 return line_speed;
723}
724
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725void bnx2x_link_report(struct bnx2x *bp)
726{
727 if (bp->flags & MF_FUNC_DIS) {
728 netif_carrier_off(bp->dev);
729 netdev_err(bp->dev, "NIC Link is Down\n");
730 return;
731 }
732
733 if (bp->link_vars.link_up) {
734 u16 line_speed;
735
736 if (bp->state == BNX2X_STATE_OPEN)
737 netif_carrier_on(bp->dev);
738 netdev_info(bp->dev, "NIC Link is Up, ");
739
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800740 line_speed = bnx2x_get_mf_speed(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000742 pr_cont("%d Mbps ", line_speed);
743
744 if (bp->link_vars.duplex == DUPLEX_FULL)
745 pr_cont("full duplex");
746 else
747 pr_cont("half duplex");
748
749 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
750 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
751 pr_cont(", receive ");
752 if (bp->link_vars.flow_ctrl &
753 BNX2X_FLOW_CTRL_TX)
754 pr_cont("& transmit ");
755 } else {
756 pr_cont(", transmit ");
757 }
758 pr_cont("flow control ON");
759 }
760 pr_cont("\n");
761
762 } else { /* link_down */
763 netif_carrier_off(bp->dev);
764 netdev_err(bp->dev, "NIC Link is Down\n");
765 }
766}
767
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000768/* Returns the number of actually allocated BDs */
769static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
770 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000771{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000772 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000773 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000774 int i;
775
776 fp->rx_comp_cons = 0;
777 cqe_ring_prod = ring_prod = 0;
778 for (i = 0; i < rx_ring_size; i++) {
779 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
780 BNX2X_ERR("was only able to allocate "
781 "%d rx skbs on queue[%d]\n", i, fp->index);
782 fp->eth_q_stats.rx_skb_alloc_failed++;
783 break;
784 }
785 ring_prod = NEXT_RX_IDX(ring_prod);
786 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
787 WARN_ON(ring_prod <= i);
788 }
789
790 fp->rx_bd_prod = ring_prod;
791 /* Limit the CQE producer by the CQE ring size */
792 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
793 cqe_ring_prod);
794 fp->rx_pkt = fp->rx_calls = 0;
795
796 return i;
797}
798
799static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
800{
801 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000802 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
803 MAX_RX_AVAIL/bp->num_queues;
804
805 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000806
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000807 bnx2x_alloc_rx_bds(fp, rx_ring_size);
808
809 /* Warning!
810 * this will generate an interrupt (to the TSTORM)
811 * must only be done after chip is initialized
812 */
813 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
814 fp->rx_sge_prod);
815}
816
817void bnx2x_init_rx_rings(struct bnx2x *bp)
818{
819 int func = BP_FUNC(bp);
820 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
821 ETH_MAX_AGGREGATION_QUEUES_E1H;
822 u16 ring_prod;
823 int i, j;
824
825 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
Dmitry Kravkovc8e4f482010-10-17 23:09:30 +0000826 IP_HEADER_ALIGNMENT_PADDING;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000827
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000828 DP(NETIF_MSG_IFUP,
829 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
830
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000831 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000832 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000833
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000834 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000835 for (i = 0; i < max_agg_queues; i++) {
836 fp->tpa_pool[i].skb =
837 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
838 if (!fp->tpa_pool[i].skb) {
839 BNX2X_ERR("Failed to allocate TPA "
840 "skb pool for queue[%d] - "
841 "disabling TPA on this "
842 "queue!\n", j);
843 bnx2x_free_tpa_pool(bp, fp, i);
844 fp->disable_tpa = 1;
845 break;
846 }
847 dma_unmap_addr_set((struct sw_rx_bd *)
848 &bp->fp->tpa_pool[i],
849 mapping, 0);
850 fp->tpa_state[i] = BNX2X_TPA_STOP;
851 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000852
853 /* "next page" elements initialization */
854 bnx2x_set_next_page_sgl(fp);
855
856 /* set SGEs bit mask */
857 bnx2x_init_sge_ring_bit_mask(fp);
858
859 /* Allocate SGEs and initialize the ring elements */
860 for (i = 0, ring_prod = 0;
861 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
862
863 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
864 BNX2X_ERR("was only able to allocate "
865 "%d rx sges\n", i);
866 BNX2X_ERR("disabling TPA for"
867 " queue[%d]\n", j);
868 /* Cleanup already allocated elements */
869 bnx2x_free_rx_sge_range(bp,
870 fp, ring_prod);
871 bnx2x_free_tpa_pool(bp,
872 fp, max_agg_queues);
873 fp->disable_tpa = 1;
874 ring_prod = 0;
875 break;
876 }
877 ring_prod = NEXT_SGE_IDX(ring_prod);
878 }
879
880 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881 }
882 }
883
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000884 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000885 struct bnx2x_fastpath *fp = &bp->fp[j];
886
887 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000888
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000889 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000890
891 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000892 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000893
894 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000895 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000896
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000897 if (j != 0)
898 continue;
899
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000900 if (!CHIP_IS_E2(bp)) {
901 REG_WR(bp, BAR_USTRORM_INTMEM +
902 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
903 U64_LO(fp->rx_comp_mapping));
904 REG_WR(bp, BAR_USTRORM_INTMEM +
905 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
906 U64_HI(fp->rx_comp_mapping));
907 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000908 }
909}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000910
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000911static void bnx2x_free_tx_skbs(struct bnx2x *bp)
912{
913 int i;
914
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000915 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916 struct bnx2x_fastpath *fp = &bp->fp[i];
917
918 u16 bd_cons = fp->tx_bd_cons;
919 u16 sw_prod = fp->tx_pkt_prod;
920 u16 sw_cons = fp->tx_pkt_cons;
921
922 while (sw_cons != sw_prod) {
923 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
924 sw_cons++;
925 }
926 }
927}
928
929static void bnx2x_free_rx_skbs(struct bnx2x *bp)
930{
931 int i, j;
932
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000933 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000934 struct bnx2x_fastpath *fp = &bp->fp[j];
935
936 for (i = 0; i < NUM_RX_BD; i++) {
937 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
938 struct sk_buff *skb = rx_buf->skb;
939
940 if (skb == NULL)
941 continue;
942
943 dma_unmap_single(&bp->pdev->dev,
944 dma_unmap_addr(rx_buf, mapping),
945 bp->rx_buf_size, DMA_FROM_DEVICE);
946
947 rx_buf->skb = NULL;
948 dev_kfree_skb(skb);
949 }
950 if (!fp->disable_tpa)
951 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
952 ETH_MAX_AGGREGATION_QUEUES_E1 :
953 ETH_MAX_AGGREGATION_QUEUES_E1H);
954 }
955}
956
957void bnx2x_free_skbs(struct bnx2x *bp)
958{
959 bnx2x_free_tx_skbs(bp);
960 bnx2x_free_rx_skbs(bp);
961}
962
963static void bnx2x_free_msix_irqs(struct bnx2x *bp)
964{
965 int i, offset = 1;
966
967 free_irq(bp->msix_table[0].vector, bp->dev);
968 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
969 bp->msix_table[0].vector);
970
971#ifdef BCM_CNIC
972 offset++;
973#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000974 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
976 "state %x\n", i, bp->msix_table[i + offset].vector,
977 bnx2x_fp(bp, i, state));
978
979 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
980 }
981}
982
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000983void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000985 if (bp->flags & USING_MSIX_FLAG)
986 bnx2x_free_msix_irqs(bp);
987 else if (bp->flags & USING_MSI_FLAG)
988 free_irq(bp->pdev->irq, bp->dev);
989 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990 free_irq(bp->pdev->irq, bp->dev);
991}
992
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000993int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000994{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000995 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000996
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000997 bp->msix_table[msix_vec].entry = msix_vec;
998 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
999 bp->msix_table[0].entry);
1000 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001
1002#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001003 bp->msix_table[msix_vec].entry = msix_vec;
1004 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1005 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1006 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001007#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001008 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001009 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001011 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1012 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001013 }
1014
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001015 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001016
1017 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001018
1019 /*
1020 * reconfigure number of tx/rx queues according to available
1021 * MSI-X vectors
1022 */
1023 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001024 /* how less vectors we will have? */
1025 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001026
1027 DP(NETIF_MSG_IFUP,
1028 "Trying to use less MSI-X vectors: %d\n", rc);
1029
1030 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1031
1032 if (rc) {
1033 DP(NETIF_MSG_IFUP,
1034 "MSI-X is not attainable rc %d\n", rc);
1035 return rc;
1036 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001037 /*
1038 * decrease number of queues by number of unallocated entries
1039 */
1040 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041
1042 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1043 bp->num_queues);
1044 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001045 /* fall to INTx if not enough memory */
1046 if (rc == -ENOMEM)
1047 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001048 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1049 return rc;
1050 }
1051
1052 bp->flags |= USING_MSIX_FLAG;
1053
1054 return 0;
1055}
1056
1057static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1058{
1059 int i, rc, offset = 1;
1060
1061 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1062 bp->dev->name, bp->dev);
1063 if (rc) {
1064 BNX2X_ERR("request sp irq failed\n");
1065 return -EBUSY;
1066 }
1067
1068#ifdef BCM_CNIC
1069 offset++;
1070#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001071 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001072 struct bnx2x_fastpath *fp = &bp->fp[i];
1073 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1074 bp->dev->name, i);
1075
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001076 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077 bnx2x_msix_fp_int, 0, fp->name, fp);
1078 if (rc) {
1079 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1080 bnx2x_free_msix_irqs(bp);
1081 return -EBUSY;
1082 }
1083
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001084 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001085 fp->state = BNX2X_FP_STATE_IRQ;
1086 }
1087
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001088 i = BNX2X_NUM_ETH_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001089 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001090 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1091 " ... fp[%d] %d\n",
1092 bp->msix_table[0].vector,
1093 0, bp->msix_table[offset].vector,
1094 i - 1, bp->msix_table[offset + i - 1].vector);
1095
1096 return 0;
1097}
1098
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001099int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001100{
1101 int rc;
1102
1103 rc = pci_enable_msi(bp->pdev);
1104 if (rc) {
1105 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1106 return -1;
1107 }
1108 bp->flags |= USING_MSI_FLAG;
1109
1110 return 0;
1111}
1112
1113static int bnx2x_req_irq(struct bnx2x *bp)
1114{
1115 unsigned long flags;
1116 int rc;
1117
1118 if (bp->flags & USING_MSI_FLAG)
1119 flags = 0;
1120 else
1121 flags = IRQF_SHARED;
1122
1123 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1124 bp->dev->name, bp->dev);
1125 if (!rc)
1126 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1127
1128 return rc;
1129}
1130
1131static void bnx2x_napi_enable(struct bnx2x *bp)
1132{
1133 int i;
1134
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001135 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001136 napi_enable(&bnx2x_fp(bp, i, napi));
1137}
1138
1139static void bnx2x_napi_disable(struct bnx2x *bp)
1140{
1141 int i;
1142
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001143 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001144 napi_disable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147void bnx2x_netif_start(struct bnx2x *bp)
1148{
1149 int intr_sem;
1150
1151 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1152 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1153
1154 if (intr_sem) {
1155 if (netif_running(bp->dev)) {
1156 bnx2x_napi_enable(bp);
1157 bnx2x_int_enable(bp);
1158 if (bp->state == BNX2X_STATE_OPEN)
1159 netif_tx_wake_all_queues(bp->dev);
1160 }
1161 }
1162}
1163
1164void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1165{
1166 bnx2x_int_disable_sync(bp, disable_hw);
1167 bnx2x_napi_disable(bp);
1168 netif_tx_disable(bp->dev);
1169}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001170
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001171u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1172{
1173#ifdef BCM_CNIC
1174 struct bnx2x *bp = netdev_priv(dev);
1175 if (NO_FCOE(bp))
1176 return skb_tx_hash(dev, skb);
1177 else {
1178 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1179 u16 ether_type = ntohs(hdr->h_proto);
1180
1181 /* Skip VLAN tag if present */
1182 if (ether_type == ETH_P_8021Q) {
1183 struct vlan_ethhdr *vhdr =
1184 (struct vlan_ethhdr *)skb->data;
1185
1186 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1187 }
1188
1189 /* If ethertype is FCoE or FIP - use FCoE ring */
1190 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1191 return bnx2x_fcoe(bp, index);
1192 }
1193#endif
1194 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1195 */
1196 return __skb_tx_hash(dev, skb,
1197 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1198}
1199
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001200void bnx2x_set_num_queues(struct bnx2x *bp)
1201{
1202 switch (bp->multi_mode) {
1203 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001205 break;
1206 case ETH_RSS_MODE_REGULAR:
1207 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001208 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001209
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001210 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001211 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001212 break;
1213 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001214
1215 /* Add special queues */
1216 bp->num_queues += NONE_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217}
1218
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001219#ifdef BCM_CNIC
1220static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1221{
1222 if (!NO_FCOE(bp)) {
1223 if (!IS_MF_SD(bp))
1224 bnx2x_set_fip_eth_mac_addr(bp, 1);
1225 bnx2x_set_all_enode_macs(bp, 1);
1226 bp->flags |= FCOE_MACS_SET;
1227 }
1228}
1229#endif
1230
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001231static void bnx2x_release_firmware(struct bnx2x *bp)
1232{
1233 kfree(bp->init_ops_offsets);
1234 kfree(bp->init_ops);
1235 kfree(bp->init_data);
1236 release_firmware(bp->firmware);
1237}
1238
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001239static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1240{
1241 int rc, num = bp->num_queues;
1242
1243#ifdef BCM_CNIC
1244 if (NO_FCOE(bp))
1245 num -= FCOE_CONTEXT_USE;
1246
1247#endif
1248 netif_set_real_num_tx_queues(bp->dev, num);
1249 rc = netif_set_real_num_rx_queues(bp->dev, num);
1250 return rc;
1251}
1252
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001253/* must be called with rtnl_lock */
1254int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1255{
1256 u32 load_code;
1257 int i, rc;
1258
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001259 /* Set init arrays */
1260 rc = bnx2x_init_firmware(bp);
1261 if (rc) {
1262 BNX2X_ERR("Error loading firmware\n");
1263 return rc;
1264 }
1265
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001266#ifdef BNX2X_STOP_ON_ERROR
1267 if (unlikely(bp->panic))
1268 return -EPERM;
1269#endif
1270
1271 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1272
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001273 /* must be called before memory allocation and HW init */
1274 bnx2x_ilt_set_info(bp);
1275
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001276 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001277 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001278
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001279 rc = bnx2x_set_real_num_queues(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001280 if (rc) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001281 BNX2X_ERR("Unable to set real_num_queues\n");
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001282 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001283 }
1284
1285 for_each_queue(bp, i)
1286 bnx2x_fp(bp, i, disable_tpa) =
1287 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1288
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001289#ifdef BCM_CNIC
1290 /* We don't want TPA on FCoE L2 ring */
1291 bnx2x_fcoe(bp, disable_tpa) = 1;
1292#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001293 bnx2x_napi_enable(bp);
1294
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295 /* Send LOAD_REQUEST command to MCP
1296 Returns the type of LOAD command:
1297 if it is the first port to be initialized
1298 common blocks should be initialized, otherwise - not
1299 */
1300 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001301 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302 if (!load_code) {
1303 BNX2X_ERR("MCP response failure, aborting\n");
1304 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001305 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001306 }
1307 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1308 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001309 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001310 }
1311
1312 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001313 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001314 int port = BP_PORT(bp);
1315
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001316 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1317 path, load_count[path][0], load_count[path][1],
1318 load_count[path][2]);
1319 load_count[path][0]++;
1320 load_count[path][1 + port]++;
1321 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1322 path, load_count[path][0], load_count[path][1],
1323 load_count[path][2]);
1324 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001325 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001326 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001327 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1328 else
1329 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1330 }
1331
1332 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001333 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001334 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1335 bp->port.pmf = 1;
1336 else
1337 bp->port.pmf = 0;
1338 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1339
1340 /* Initialize HW */
1341 rc = bnx2x_init_hw(bp, load_code);
1342 if (rc) {
1343 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001344 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001345 goto load_error2;
1346 }
1347
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001348 /* Connect to IRQs */
1349 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001350 if (rc) {
1351 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1352 goto load_error2;
1353 }
1354
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355 /* Setup NIC internals and enable interrupts */
1356 bnx2x_nic_init(bp, load_code);
1357
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001358 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1359 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001360 (bp->common.shmem2_base))
1361 SHMEM2_WR(bp, dcc_support,
1362 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1363 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1364
1365 /* Send LOAD_DONE command to MCP */
1366 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001367 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001368 if (!load_code) {
1369 BNX2X_ERR("MCP response failure, aborting\n");
1370 rc = -EBUSY;
1371 goto load_error3;
1372 }
1373 }
1374
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00001375 bnx2x_dcbx_init(bp);
1376
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001377 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1378
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001379 rc = bnx2x_func_start(bp);
1380 if (rc) {
1381 BNX2X_ERR("Function start failed!\n");
1382#ifndef BNX2X_STOP_ON_ERROR
1383 goto load_error3;
1384#else
1385 bp->panic = 1;
1386 return -EBUSY;
1387#endif
1388 }
1389
1390 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001391 if (rc) {
1392 BNX2X_ERR("Setup leading failed!\n");
1393#ifndef BNX2X_STOP_ON_ERROR
1394 goto load_error3;
1395#else
1396 bp->panic = 1;
1397 return -EBUSY;
1398#endif
1399 }
1400
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001401 if (!CHIP_IS_E1(bp) &&
1402 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1403 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1404 bp->flags |= MF_FUNC_DIS;
1405 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001406
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001407#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001408 /* Enable Timer scan */
1409 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001411
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001412 for_each_nondefault_queue(bp, i) {
1413 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1414 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001415#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001416 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001417#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001418 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001419#endif
1420 }
1421
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001422 /* Now when Clients are configured we are ready to work */
1423 bp->state = BNX2X_STATE_OPEN;
1424
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001425#ifdef BCM_CNIC
1426 bnx2x_set_fcoe_eth_macs(bp);
1427#endif
1428
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001429 bnx2x_set_eth_mac(bp, 1);
1430
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001431 if (bp->port.pmf)
1432 bnx2x_initial_phy_init(bp, load_mode);
1433
1434 /* Start fast path */
1435 switch (load_mode) {
1436 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001437 /* Tx queue should be only reenabled */
1438 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001439 /* Initialize the receive filter. */
1440 bnx2x_set_rx_mode(bp->dev);
1441 break;
1442
1443 case LOAD_OPEN:
1444 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001445 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001446 /* Initialize the receive filter. */
1447 bnx2x_set_rx_mode(bp->dev);
1448 break;
1449
1450 case LOAD_DIAG:
1451 /* Initialize the receive filter. */
1452 bnx2x_set_rx_mode(bp->dev);
1453 bp->state = BNX2X_STATE_DIAG;
1454 break;
1455
1456 default:
1457 break;
1458 }
1459
1460 if (!bp->port.pmf)
1461 bnx2x__link_status_update(bp);
1462
1463 /* start the timer */
1464 mod_timer(&bp->timer, jiffies + bp->current_interval);
1465
1466#ifdef BCM_CNIC
1467 bnx2x_setup_cnic_irq_info(bp);
1468 if (bp->state == BNX2X_STATE_OPEN)
1469 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1470#endif
1471 bnx2x_inc_load_cnt(bp);
1472
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001473 bnx2x_release_firmware(bp);
1474
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001475 return 0;
1476
1477#ifdef BCM_CNIC
1478load_error4:
1479 /* Disable Timer scan */
1480 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1481#endif
1482load_error3:
1483 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485 /* Free SKBs, SGEs, TPA pool and driver internals */
1486 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001487 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001488 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001489
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001490 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001491 bnx2x_free_irq(bp);
1492load_error2:
1493 if (!BP_NOMCP(bp)) {
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1495 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1496 }
1497
1498 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001499load_error1:
1500 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001501load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001502 bnx2x_free_mem(bp);
1503
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001504 bnx2x_release_firmware(bp);
1505
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001506 return rc;
1507}
1508
1509/* must be called with rtnl_lock */
1510int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1511{
1512 int i;
1513
1514 if (bp->state == BNX2X_STATE_CLOSED) {
1515 /* Interface has been removed - nothing to recover */
1516 bp->recovery_state = BNX2X_RECOVERY_DONE;
1517 bp->is_leader = 0;
1518 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1519 smp_wmb();
1520
1521 return -EINVAL;
1522 }
1523
1524#ifdef BCM_CNIC
1525 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1526#endif
1527 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1528
1529 /* Set "drop all" */
1530 bp->rx_mode = BNX2X_RX_MODE_NONE;
1531 bnx2x_set_storm_rx_mode(bp);
1532
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001533 /* Stop Tx */
1534 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001535
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001536 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001538 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001539 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001540
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001541 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001542
1543 /* Cleanup the chip if needed */
1544 if (unload_mode != UNLOAD_RECOVERY)
1545 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001546 else {
1547 /* Disable HW interrupts, NAPI and Tx */
1548 bnx2x_netif_stop(bp, 1);
1549
1550 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001551 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001552 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001553
1554 bp->port.pmf = 0;
1555
1556 /* Free SKBs, SGEs, TPA pool and driver internals */
1557 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001558 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001559 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001560
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001561 bnx2x_free_mem(bp);
1562
1563 bp->state = BNX2X_STATE_CLOSED;
1564
1565 /* The last driver must disable a "close the gate" if there is no
1566 * parity attention or "process kill" pending.
1567 */
1568 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1569 bnx2x_reset_is_done(bp))
1570 bnx2x_disable_close_the_gate(bp);
1571
1572 /* Reset MCP mail box sequence if there is on going recovery */
1573 if (unload_mode == UNLOAD_RECOVERY)
1574 bp->fw_seq = 0;
1575
1576 return 0;
1577}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001578
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001579int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1580{
1581 u16 pmcsr;
1582
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001583 /* If there is no power capability, silently succeed */
1584 if (!bp->pm_cap) {
1585 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1586 return 0;
1587 }
1588
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001589 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1590
1591 switch (state) {
1592 case PCI_D0:
1593 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1594 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1595 PCI_PM_CTRL_PME_STATUS));
1596
1597 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1598 /* delay required during transition out of D3hot */
1599 msleep(20);
1600 break;
1601
1602 case PCI_D3hot:
1603 /* If there are other clients above don't
1604 shut down the power */
1605 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1606 return 0;
1607 /* Don't shut down the power for emulation and FPGA */
1608 if (CHIP_REV_IS_SLOW(bp))
1609 return 0;
1610
1611 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1612 pmcsr |= 3;
1613
1614 if (bp->wol)
1615 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1616
1617 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1618 pmcsr);
1619
1620 /* No more memory access after this point until
1621 * device is brought back to D0.
1622 */
1623 break;
1624
1625 default:
1626 return -EINVAL;
1627 }
1628 return 0;
1629}
1630
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001631/*
1632 * net_device service functions
1633 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001634int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001635{
1636 int work_done = 0;
1637 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1638 napi);
1639 struct bnx2x *bp = fp->bp;
1640
1641 while (1) {
1642#ifdef BNX2X_STOP_ON_ERROR
1643 if (unlikely(bp->panic)) {
1644 napi_complete(napi);
1645 return 0;
1646 }
1647#endif
1648
1649 if (bnx2x_has_tx_work(fp))
1650 bnx2x_tx_int(fp);
1651
1652 if (bnx2x_has_rx_work(fp)) {
1653 work_done += bnx2x_rx_int(fp, budget - work_done);
1654
1655 /* must not complete if we consumed full budget */
1656 if (work_done >= budget)
1657 break;
1658 }
1659
1660 /* Fall out from the NAPI loop if needed */
1661 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001662#ifdef BCM_CNIC
1663 /* No need to update SB for FCoE L2 ring as long as
1664 * it's connected to the default SB and the SB
1665 * has been updated when NAPI was scheduled.
1666 */
1667 if (IS_FCOE_FP(fp)) {
1668 napi_complete(napi);
1669 break;
1670 }
1671#endif
1672
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001674 /* bnx2x_has_rx_work() reads the status block,
1675 * thus we need to ensure that status block indices
1676 * have been actually read (bnx2x_update_fpsb_idx)
1677 * prior to this check (bnx2x_has_rx_work) so that
1678 * we won't write the "newer" value of the status block
1679 * to IGU (if there was a DMA right after
1680 * bnx2x_has_rx_work and if there is no rmb, the memory
1681 * reading (bnx2x_update_fpsb_idx) may be postponed
1682 * to right before bnx2x_ack_sb). In this case there
1683 * will never be another interrupt until there is
1684 * another update of the status block, while there
1685 * is still unhandled work.
1686 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001687 rmb();
1688
1689 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1690 napi_complete(napi);
1691 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001692 DP(NETIF_MSG_HW,
1693 "Update index to %d\n", fp->fp_hc_idx);
1694 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1695 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001696 IGU_INT_ENABLE, 1);
1697 break;
1698 }
1699 }
1700 }
1701
1702 return work_done;
1703}
1704
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001705/* we split the first BD into headers and data BDs
1706 * to ease the pain of our fellow microcode engineers
1707 * we use one mapping for both BDs
1708 * So far this has only been observed to happen
1709 * in Other Operating Systems(TM)
1710 */
1711static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1712 struct bnx2x_fastpath *fp,
1713 struct sw_tx_bd *tx_buf,
1714 struct eth_tx_start_bd **tx_bd, u16 hlen,
1715 u16 bd_prod, int nbd)
1716{
1717 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1718 struct eth_tx_bd *d_tx_bd;
1719 dma_addr_t mapping;
1720 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1721
1722 /* first fix first BD */
1723 h_tx_bd->nbd = cpu_to_le16(nbd);
1724 h_tx_bd->nbytes = cpu_to_le16(hlen);
1725
1726 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1727 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1728 h_tx_bd->addr_lo, h_tx_bd->nbd);
1729
1730 /* now get a new data BD
1731 * (after the pbd) and fill it */
1732 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1733 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1734
1735 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1736 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1737
1738 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1739 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1740 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1741
1742 /* this marks the BD as one that has no individual mapping */
1743 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1744
1745 DP(NETIF_MSG_TX_QUEUED,
1746 "TSO split data size is %d (%x:%x)\n",
1747 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1748
1749 /* update tx_bd */
1750 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1751
1752 return bd_prod;
1753}
1754
1755static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1756{
1757 if (fix > 0)
1758 csum = (u16) ~csum_fold(csum_sub(csum,
1759 csum_partial(t_header - fix, fix, 0)));
1760
1761 else if (fix < 0)
1762 csum = (u16) ~csum_fold(csum_add(csum,
1763 csum_partial(t_header, -fix, 0)));
1764
1765 return swab16(csum);
1766}
1767
1768static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1769{
1770 u32 rc;
1771
1772 if (skb->ip_summed != CHECKSUM_PARTIAL)
1773 rc = XMIT_PLAIN;
1774
1775 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00001776 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001777 rc = XMIT_CSUM_V6;
1778 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1779 rc |= XMIT_CSUM_TCP;
1780
1781 } else {
1782 rc = XMIT_CSUM_V4;
1783 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1784 rc |= XMIT_CSUM_TCP;
1785 }
1786 }
1787
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00001788 if (skb_is_gso_v6(skb))
1789 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1790 else if (skb_is_gso(skb))
1791 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001792
1793 return rc;
1794}
1795
1796#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1797/* check if packet requires linearization (packet is too fragmented)
1798 no need to check fragmentation if page size > 8K (there will be no
1799 violation to FW restrictions) */
1800static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1801 u32 xmit_type)
1802{
1803 int to_copy = 0;
1804 int hlen = 0;
1805 int first_bd_sz = 0;
1806
1807 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1808 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1809
1810 if (xmit_type & XMIT_GSO) {
1811 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1812 /* Check if LSO packet needs to be copied:
1813 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1814 int wnd_size = MAX_FETCH_BD - 3;
1815 /* Number of windows to check */
1816 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1817 int wnd_idx = 0;
1818 int frag_idx = 0;
1819 u32 wnd_sum = 0;
1820
1821 /* Headers length */
1822 hlen = (int)(skb_transport_header(skb) - skb->data) +
1823 tcp_hdrlen(skb);
1824
1825 /* Amount of data (w/o headers) on linear part of SKB*/
1826 first_bd_sz = skb_headlen(skb) - hlen;
1827
1828 wnd_sum = first_bd_sz;
1829
1830 /* Calculate the first sum - it's special */
1831 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1832 wnd_sum +=
1833 skb_shinfo(skb)->frags[frag_idx].size;
1834
1835 /* If there was data on linear skb data - check it */
1836 if (first_bd_sz > 0) {
1837 if (unlikely(wnd_sum < lso_mss)) {
1838 to_copy = 1;
1839 goto exit_lbl;
1840 }
1841
1842 wnd_sum -= first_bd_sz;
1843 }
1844
1845 /* Others are easier: run through the frag list and
1846 check all windows */
1847 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1848 wnd_sum +=
1849 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1850
1851 if (unlikely(wnd_sum < lso_mss)) {
1852 to_copy = 1;
1853 break;
1854 }
1855 wnd_sum -=
1856 skb_shinfo(skb)->frags[wnd_idx].size;
1857 }
1858 } else {
1859 /* in non-LSO too fragmented packet should always
1860 be linearized */
1861 to_copy = 1;
1862 }
1863 }
1864
1865exit_lbl:
1866 if (unlikely(to_copy))
1867 DP(NETIF_MSG_TX_QUEUED,
1868 "Linearization IS REQUIRED for %s packet. "
1869 "num_frags %d hlen %d first_bd_sz %d\n",
1870 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1871 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1872
1873 return to_copy;
1874}
1875#endif
1876
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001877static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1878 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001879{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001880 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1881 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1882 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001883 if ((xmit_type & XMIT_GSO_V6) &&
1884 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001885 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001886}
1887
1888/**
1889 * Update PBD in GSO case.
1890 *
1891 * @param skb
1892 * @param tx_start_bd
1893 * @param pbd
1894 * @param xmit_type
1895 */
1896static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1897 struct eth_tx_parse_bd_e1x *pbd,
1898 u32 xmit_type)
1899{
1900 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1901 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1902 pbd->tcp_flags = pbd_tcp_flags(skb);
1903
1904 if (xmit_type & XMIT_GSO_V4) {
1905 pbd->ip_id = swab16(ip_hdr(skb)->id);
1906 pbd->tcp_pseudo_csum =
1907 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1908 ip_hdr(skb)->daddr,
1909 0, IPPROTO_TCP, 0));
1910
1911 } else
1912 pbd->tcp_pseudo_csum =
1913 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1914 &ipv6_hdr(skb)->daddr,
1915 0, IPPROTO_TCP, 0));
1916
1917 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1918}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001919
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001920/**
1921 *
1922 * @param skb
1923 * @param tx_start_bd
1924 * @param pbd_e2
1925 * @param xmit_type
1926 *
1927 * @return header len
1928 */
1929static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001930 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001931{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001932 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1933 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1934 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001935
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001936 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1937 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1938 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001939
1940 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1941}
1942
1943/**
1944 *
1945 * @param skb
1946 * @param tx_start_bd
1947 * @param pbd
1948 * @param xmit_type
1949 *
1950 * @return Header length
1951 */
1952static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1953 struct eth_tx_parse_bd_e1x *pbd,
1954 u32 xmit_type)
1955{
1956 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1957
1958 /* for now NS flag is not used in Linux */
1959 pbd->global_data =
1960 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1961 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1962
1963 pbd->ip_hlen_w = (skb_transport_header(skb) -
1964 skb_network_header(skb)) / 2;
1965
1966 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1967
1968 pbd->total_hlen_w = cpu_to_le16(hlen);
1969 hlen = hlen*2;
1970
1971 if (xmit_type & XMIT_CSUM_TCP) {
1972 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1973
1974 } else {
1975 s8 fix = SKB_CS_OFF(skb); /* signed! */
1976
1977 DP(NETIF_MSG_TX_QUEUED,
1978 "hlen %d fix %d csum before fix %x\n",
1979 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1980
1981 /* HW bug: fixup the CSUM */
1982 pbd->tcp_pseudo_csum =
1983 bnx2x_csum_fix(skb_transport_header(skb),
1984 SKB_CS(skb), fix);
1985
1986 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1987 pbd->tcp_pseudo_csum);
1988 }
1989
1990 return hlen;
1991}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001992
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001993/* called with netif_tx_lock
1994 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1995 * netif_wake_queue()
1996 */
1997netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1998{
1999 struct bnx2x *bp = netdev_priv(dev);
2000 struct bnx2x_fastpath *fp;
2001 struct netdev_queue *txq;
2002 struct sw_tx_bd *tx_buf;
2003 struct eth_tx_start_bd *tx_start_bd;
2004 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002005 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002006 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002007 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002008 u16 pkt_prod, bd_prod;
2009 int nbd, fp_index;
2010 dma_addr_t mapping;
2011 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2012 int i;
2013 u8 hlen = 0;
2014 __le16 pkt_size = 0;
2015 struct ethhdr *eth;
2016 u8 mac_type = UNICAST_ADDRESS;
2017
2018#ifdef BNX2X_STOP_ON_ERROR
2019 if (unlikely(bp->panic))
2020 return NETDEV_TX_BUSY;
2021#endif
2022
2023 fp_index = skb_get_queue_mapping(skb);
2024 txq = netdev_get_tx_queue(dev, fp_index);
2025
2026 fp = &bp->fp[fp_index];
2027
2028 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2029 fp->eth_q_stats.driver_xoff++;
2030 netif_tx_stop_queue(txq);
2031 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2032 return NETDEV_TX_BUSY;
2033 }
2034
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002035 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2036 "protocol(%x,%x) gso type %x xmit_type %x\n",
2037 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2039
2040 eth = (struct ethhdr *)skb->data;
2041
2042 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2043 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2044 if (is_broadcast_ether_addr(eth->h_dest))
2045 mac_type = BROADCAST_ADDRESS;
2046 else
2047 mac_type = MULTICAST_ADDRESS;
2048 }
2049
2050#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2051 /* First, check if we need to linearize the skb (due to FW
2052 restrictions). No need to check fragmentation if page size > 8K
2053 (there will be no violation to FW restrictions) */
2054 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2055 /* Statistics of linearization */
2056 bp->lin_cnt++;
2057 if (skb_linearize(skb) != 0) {
2058 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2059 "silently dropping this SKB\n");
2060 dev_kfree_skb_any(skb);
2061 return NETDEV_TX_OK;
2062 }
2063 }
2064#endif
2065
2066 /*
2067 Please read carefully. First we use one BD which we mark as start,
2068 then we have a parsing info BD (used for TSO or xsum),
2069 and only then we have the rest of the TSO BDs.
2070 (don't forget to mark the last one as last,
2071 and to unmap only AFTER you write to the BD ...)
2072 And above all, all pdb sizes are in words - NOT DWORDS!
2073 */
2074
2075 pkt_prod = fp->tx_pkt_prod++;
2076 bd_prod = TX_BD(fp->tx_bd_prod);
2077
2078 /* get a tx_buf and first BD */
2079 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2080 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2081
2082 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002083 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2084 mac_type);
2085
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002086 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002087 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002088
2089 /* remember the first BD of the packet */
2090 tx_buf->first_bd = fp->tx_bd_prod;
2091 tx_buf->skb = skb;
2092 tx_buf->flags = 0;
2093
2094 DP(NETIF_MSG_TX_QUEUED,
2095 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2096 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2097
Jesse Grosseab6d182010-10-20 13:56:03 +00002098 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002099 tx_start_bd->vlan_or_ethertype =
2100 cpu_to_le16(vlan_tx_tag_get(skb));
2101 tx_start_bd->bd_flags.as_bitfield |=
2102 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002103 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002104 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002105
2106 /* turn on parsing and get a BD */
2107 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002108
2109 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002110 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2111
2112 if (xmit_type & XMIT_CSUM_V4)
2113 tx_start_bd->bd_flags.as_bitfield |=
2114 ETH_TX_BD_FLAGS_IP_CSUM;
2115 else
2116 tx_start_bd->bd_flags.as_bitfield |=
2117 ETH_TX_BD_FLAGS_IPV6;
2118
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002119 if (!(xmit_type & XMIT_CSUM_TCP))
2120 tx_start_bd->bd_flags.as_bitfield |=
2121 ETH_TX_BD_FLAGS_IS_UDP;
2122 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002123
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002124 if (CHIP_IS_E2(bp)) {
2125 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2126 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2127 /* Set PBD in checksum offload case */
2128 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002129 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2130 &pbd_e2_parsing_data,
2131 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002132 } else {
2133 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2134 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2135 /* Set PBD in checksum offload case */
2136 if (xmit_type & XMIT_CSUM)
2137 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002138
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002139 }
2140
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002141 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002142 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2143 skb_headlen(skb), DMA_TO_DEVICE);
2144
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002145 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002146 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2147 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2148 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2149 tx_start_bd->nbd = cpu_to_le16(nbd);
2150 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2151 pkt_size = tx_start_bd->nbytes;
2152
2153 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2154 " nbytes %d flags %x vlan %x\n",
2155 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2156 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002157 tx_start_bd->bd_flags.as_bitfield,
2158 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002159
2160 if (xmit_type & XMIT_GSO) {
2161
2162 DP(NETIF_MSG_TX_QUEUED,
2163 "TSO packet len %d hlen %d total len %d tso size %d\n",
2164 skb->len, hlen, skb_headlen(skb),
2165 skb_shinfo(skb)->gso_size);
2166
2167 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2168
2169 if (unlikely(skb_headlen(skb) > hlen))
2170 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2171 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002172 if (CHIP_IS_E2(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002173 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2174 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002175 else
2176 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002177 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002178
2179 /* Set the PBD's parsing_data field if not zero
2180 * (for the chips newer than 57711).
2181 */
2182 if (pbd_e2_parsing_data)
2183 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2184
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002185 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2186
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002187 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002188 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2189 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2190
2191 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2192 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2193 if (total_pkt_bd == NULL)
2194 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2195
2196 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2197 frag->page_offset,
2198 frag->size, DMA_TO_DEVICE);
2199
2200 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2201 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2202 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2203 le16_add_cpu(&pkt_size, frag->size);
2204
2205 DP(NETIF_MSG_TX_QUEUED,
2206 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2207 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2208 le16_to_cpu(tx_data_bd->nbytes));
2209 }
2210
2211 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2212
2213 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2214
2215 /* now send a tx doorbell, counting the next BD
2216 * if the packet contains or ends with it
2217 */
2218 if (TX_BD_POFF(bd_prod) < nbd)
2219 nbd++;
2220
2221 if (total_pkt_bd != NULL)
2222 total_pkt_bd->total_pkt_bytes = pkt_size;
2223
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002224 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002225 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002226 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002227 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002228 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2229 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2230 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2231 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002232 if (pbd_e2)
2233 DP(NETIF_MSG_TX_QUEUED,
2234 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2235 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2236 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2237 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2238 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002239 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2240
2241 /*
2242 * Make sure that the BD data is updated before updating the producer
2243 * since FW might read the BD right after the producer is updated.
2244 * This is only applicable for weak-ordered memory model archs such
2245 * as IA-64. The following barrier is also mandatory since FW will
2246 * assumes packets must have BDs.
2247 */
2248 wmb();
2249
2250 fp->tx_db.data.prod += nbd;
2251 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002252
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002253 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002254
2255 mmiowb();
2256
2257 fp->tx_bd_prod += nbd;
2258
2259 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2260 netif_tx_stop_queue(txq);
2261
2262 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2263 * ordering of set_bit() in netif_tx_stop_queue() and read of
2264 * fp->bd_tx_cons */
2265 smp_mb();
2266
2267 fp->eth_q_stats.driver_xoff++;
2268 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2269 netif_tx_wake_queue(txq);
2270 }
2271 fp->tx_pkt++;
2272
2273 return NETDEV_TX_OK;
2274}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002275
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002276/* called with rtnl_lock */
2277int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2278{
2279 struct sockaddr *addr = p;
2280 struct bnx2x *bp = netdev_priv(dev);
2281
2282 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2283 return -EINVAL;
2284
2285 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002286 if (netif_running(dev))
2287 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002288
2289 return 0;
2290}
2291
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002292
stephen hemminger8d962862010-10-21 07:50:56 +00002293static int bnx2x_setup_irqs(struct bnx2x *bp)
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002294{
2295 int rc = 0;
2296 if (bp->flags & USING_MSIX_FLAG) {
2297 rc = bnx2x_req_msix_irqs(bp);
2298 if (rc)
2299 return rc;
2300 } else {
2301 bnx2x_ack_int(bp);
2302 rc = bnx2x_req_irq(bp);
2303 if (rc) {
2304 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2305 return rc;
2306 }
2307 if (bp->flags & USING_MSI_FLAG) {
2308 bp->dev->irq = bp->pdev->irq;
2309 netdev_info(bp->dev, "using MSI IRQ %d\n",
2310 bp->pdev->irq);
2311 }
2312 }
2313
2314 return 0;
2315}
2316
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002317void bnx2x_free_mem_bp(struct bnx2x *bp)
2318{
2319 kfree(bp->fp);
2320 kfree(bp->msix_table);
2321 kfree(bp->ilt);
2322}
2323
2324int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2325{
2326 struct bnx2x_fastpath *fp;
2327 struct msix_entry *tbl;
2328 struct bnx2x_ilt *ilt;
2329
2330 /* fp array */
2331 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2332 if (!fp)
2333 goto alloc_err;
2334 bp->fp = fp;
2335
2336 /* msix table */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002337 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002338 GFP_KERNEL);
2339 if (!tbl)
2340 goto alloc_err;
2341 bp->msix_table = tbl;
2342
2343 /* ilt */
2344 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2345 if (!ilt)
2346 goto alloc_err;
2347 bp->ilt = ilt;
2348
2349 return 0;
2350alloc_err:
2351 bnx2x_free_mem_bp(bp);
2352 return -ENOMEM;
2353
2354}
2355
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002356/* called with rtnl_lock */
2357int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2358{
2359 struct bnx2x *bp = netdev_priv(dev);
2360 int rc = 0;
2361
2362 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2363 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2364 return -EAGAIN;
2365 }
2366
2367 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2368 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2369 return -EINVAL;
2370
2371 /* This does not race with packet allocation
2372 * because the actual alloc size is
2373 * only updated as part of load
2374 */
2375 dev->mtu = new_mtu;
2376
2377 if (netif_running(dev)) {
2378 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2379 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2380 }
2381
2382 return rc;
2383}
2384
2385void bnx2x_tx_timeout(struct net_device *dev)
2386{
2387 struct bnx2x *bp = netdev_priv(dev);
2388
2389#ifdef BNX2X_STOP_ON_ERROR
2390 if (!bp->panic)
2391 bnx2x_panic();
2392#endif
2393 /* This allows the netif to be shutdown gracefully before resetting */
2394 schedule_delayed_work(&bp->reset_task, 0);
2395}
2396
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002397int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2398{
2399 struct net_device *dev = pci_get_drvdata(pdev);
2400 struct bnx2x *bp;
2401
2402 if (!dev) {
2403 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2404 return -ENODEV;
2405 }
2406 bp = netdev_priv(dev);
2407
2408 rtnl_lock();
2409
2410 pci_save_state(pdev);
2411
2412 if (!netif_running(dev)) {
2413 rtnl_unlock();
2414 return 0;
2415 }
2416
2417 netif_device_detach(dev);
2418
2419 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2420
2421 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2422
2423 rtnl_unlock();
2424
2425 return 0;
2426}
2427
2428int bnx2x_resume(struct pci_dev *pdev)
2429{
2430 struct net_device *dev = pci_get_drvdata(pdev);
2431 struct bnx2x *bp;
2432 int rc;
2433
2434 if (!dev) {
2435 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2436 return -ENODEV;
2437 }
2438 bp = netdev_priv(dev);
2439
2440 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2441 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2442 return -EAGAIN;
2443 }
2444
2445 rtnl_lock();
2446
2447 pci_restore_state(pdev);
2448
2449 if (!netif_running(dev)) {
2450 rtnl_unlock();
2451 return 0;
2452 }
2453
2454 bnx2x_set_power_state(bp, PCI_D0);
2455 netif_device_attach(dev);
2456
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002457 /* Since the chip was reset, clear the FW sequence number */
2458 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002459 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2460
2461 rtnl_unlock();
2462
2463 return rc;
2464}