blob: 6905b2e0609e0d9b26bdb90e14118555e9cb12c7 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
19#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000020#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070021#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000022#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include "bnx2x_cmn.h"
24
25#ifdef BCM_VLAN
26#include <linux/if_vlan.h>
27#endif
28
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
30
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
32/* free skb in the packet ring at pos idx
33 * return idx of last bd freed
34 */
35static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
36 u16 idx)
37{
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
43 int nbd;
44
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
46 prefetch(&skb->end);
47
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
49 idx, tx_buf, skb);
50
51 /* unmap first bd */
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000055 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000056
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58#ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
61 bnx2x_panic();
62 }
63#endif
64 new_cons = nbd + tx_buf->first_bd;
65
66 /* Get the next bd */
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
68
69 /* Skip a parse bd... */
70 --nbd;
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
72
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
75 --nbd;
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
77 }
78
79 /* now free frags */
80 while (nbd > 0) {
81
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
86 if (--nbd)
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
88 }
89
90 /* release skb */
91 WARN_ON(!skb);
92 dev_kfree_skb(skb);
93 tx_buf->first_bd = 0;
94 tx_buf->skb = NULL;
95
96 return new_cons;
97}
98
99int bnx2x_tx_int(struct bnx2x_fastpath *fp)
100{
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
104
105#ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
107 return -1;
108#endif
109
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
113
114 while (sw_cons != hw_cons) {
115 u16 pkt_cons;
116
117 pkt_cons = TX_BD(sw_cons);
118
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000119 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
120 " pkt_cons %u\n",
121 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000122
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000123 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
124 sw_cons++;
125 }
126
127 fp->tx_pkt_cons = sw_cons;
128 fp->tx_bd_cons = bd_cons;
129
130 /* Need to make the tx_bd_cons update visible to start_xmit()
131 * before checking for netif_tx_queue_stopped(). Without the
132 * memory barrier, there is a small possibility that
133 * start_xmit() will miss it and cause the queue to be stopped
134 * forever.
135 */
136 smp_mb();
137
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000138 if (unlikely(netif_tx_queue_stopped(txq))) {
139 /* Taking tx_lock() is needed to prevent reenabling the queue
140 * while it's empty. This could have happen if rx_action() gets
141 * suspended in bnx2x_tx_int() after the condition before
142 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
143 *
144 * stops the queue->sees fresh tx_bd_cons->releases the queue->
145 * sends some packets consuming the whole queue again->
146 * stops the queue
147 */
148
149 __netif_tx_lock(txq, smp_processor_id());
150
151 if ((netif_tx_queue_stopped(txq)) &&
152 (bp->state == BNX2X_STATE_OPEN) &&
153 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
154 netif_tx_wake_queue(txq);
155
156 __netif_tx_unlock(txq);
157 }
158 return 0;
159}
160
161static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
162 u16 idx)
163{
164 u16 last_max = fp->last_max_sge;
165
166 if (SUB_S16(idx, last_max) > 0)
167 fp->last_max_sge = idx;
168}
169
170static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
171 struct eth_fast_path_rx_cqe *fp_cqe)
172{
173 struct bnx2x *bp = fp->bp;
174 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
175 le16_to_cpu(fp_cqe->len_on_bd)) >>
176 SGE_PAGE_SHIFT;
177 u16 last_max, last_elem, first_elem;
178 u16 delta = 0;
179 u16 i;
180
181 if (!sge_len)
182 return;
183
184 /* First mark all used pages */
185 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000186 SGE_MASK_CLEAR_BIT(fp,
187 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000188
189 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000190 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000191
192 /* Here we assume that the last SGE index is the biggest */
193 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000194 bnx2x_update_last_max_sge(fp,
195 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000196
197 last_max = RX_SGE(fp->last_max_sge);
198 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
199 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
200
201 /* If ring is not full */
202 if (last_elem + 1 != first_elem)
203 last_elem++;
204
205 /* Now update the prod */
206 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
207 if (likely(fp->sge_mask[i]))
208 break;
209
210 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
211 delta += RX_SGE_MASK_ELEM_SZ;
212 }
213
214 if (delta > 0) {
215 fp->rx_sge_prod += delta;
216 /* clear page-end entries */
217 bnx2x_clear_sge_mask_next_elems(fp);
218 }
219
220 DP(NETIF_MSG_RX_STATUS,
221 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
222 fp->last_max_sge, fp->rx_sge_prod);
223}
224
225static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
226 struct sk_buff *skb, u16 cons, u16 prod)
227{
228 struct bnx2x *bp = fp->bp;
229 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
230 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
231 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
232 dma_addr_t mapping;
233
234 /* move empty skb from pool to prod and map it */
235 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
236 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
237 bp->rx_buf_size, DMA_FROM_DEVICE);
238 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
239
240 /* move partial skb from cons to pool (don't unmap yet) */
241 fp->tpa_pool[queue] = *cons_rx_buf;
242
243 /* mark bin state as start - print error if current state != stop */
244 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
245 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
246
247 fp->tpa_state[queue] = BNX2X_TPA_START;
248
249 /* point prod_bd to new skb */
250 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
251 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
252
253#ifdef BNX2X_STOP_ON_ERROR
254 fp->tpa_queue_used |= (1 << queue);
255#ifdef _ASM_GENERIC_INT_L64_H
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
257#else
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
259#endif
260 fp->tpa_queue_used);
261#endif
262}
263
264static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
265 struct sk_buff *skb,
266 struct eth_fast_path_rx_cqe *fp_cqe,
267 u16 cqe_idx)
268{
269 struct sw_rx_page *rx_pg, old_rx_pg;
270 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
271 u32 i, frag_len, frag_size, pages;
272 int err;
273 int j;
274
275 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
276 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
277
278 /* This is needed in order to enable forwarding support */
279 if (frag_size)
280 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
281 max(frag_size, (u32)len_on_bd));
282
283#ifdef BNX2X_STOP_ON_ERROR
284 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
285 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
286 pages, cqe_idx);
287 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
288 fp_cqe->pkt_len, len_on_bd);
289 bnx2x_panic();
290 return -EINVAL;
291 }
292#endif
293
294 /* Run through the SGL and compose the fragmented skb */
295 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000296 u16 sge_idx =
297 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000298
299 /* FW gives the indices of the SGE as if the ring is an array
300 (meaning that "next" element will consume 2 indices) */
301 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
302 rx_pg = &fp->rx_page_ring[sge_idx];
303 old_rx_pg = *rx_pg;
304
305 /* If we fail to allocate a substitute page, we simply stop
306 where we are and drop the whole packet */
307 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
308 if (unlikely(err)) {
309 fp->eth_q_stats.rx_skb_alloc_failed++;
310 return err;
311 }
312
313 /* Unmap the page as we r going to pass it to the stack */
314 dma_unmap_page(&bp->pdev->dev,
315 dma_unmap_addr(&old_rx_pg, mapping),
316 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
317
318 /* Add one frag and update the appropriate fields in the skb */
319 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
320
321 skb->data_len += frag_len;
322 skb->truesize += frag_len;
323 skb->len += frag_len;
324
325 frag_size -= frag_len;
326 }
327
328 return 0;
329}
330
331static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
332 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
333 u16 cqe_idx)
334{
335 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
336 struct sk_buff *skb = rx_buf->skb;
337 /* alloc new skb */
338 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
339
340 /* Unmap skb in the pool anyway, as we are going to change
341 pool entry status to BNX2X_TPA_STOP even if new skb allocation
342 fails. */
343 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
344 bp->rx_buf_size, DMA_FROM_DEVICE);
345
346 if (likely(new_skb)) {
347 /* fix ip xsum and give it to the stack */
348 /* (no need to map the new skb) */
349#ifdef BCM_VLAN
350 int is_vlan_cqe =
351 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
352 PARSING_FLAGS_VLAN);
353 int is_not_hwaccel_vlan_cqe =
354 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
355#endif
356
357 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000358 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000359
360#ifdef BNX2X_STOP_ON_ERROR
361 if (pad + len > bp->rx_buf_size) {
362 BNX2X_ERR("skb_put is about to fail... "
363 "pad %d len %d rx_buf_size %d\n",
364 pad, len, bp->rx_buf_size);
365 bnx2x_panic();
366 return;
367 }
368#endif
369
370 skb_reserve(skb, pad);
371 skb_put(skb, len);
372
373 skb->protocol = eth_type_trans(skb, bp->dev);
374 skb->ip_summed = CHECKSUM_UNNECESSARY;
375
376 {
377 struct iphdr *iph;
378
379 iph = (struct iphdr *)skb->data;
380#ifdef BCM_VLAN
381 /* If there is no Rx VLAN offloading -
382 take VLAN tag into an account */
383 if (unlikely(is_not_hwaccel_vlan_cqe))
384 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
385#endif
386 iph->check = 0;
387 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
388 }
389
390 if (!bnx2x_fill_frag_skb(bp, fp, skb,
391 &cqe->fast_path_cqe, cqe_idx)) {
392#ifdef BCM_VLAN
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000393 if ((bp->vlgrp != NULL) &&
394 (le16_to_cpu(cqe->fast_path_cqe.
395 pars_flags.flags) & PARSING_FLAGS_VLAN))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000396 vlan_gro_receive(&fp->napi, bp->vlgrp,
397 le16_to_cpu(cqe->fast_path_cqe.
398 vlan_tag), skb);
399 else
400#endif
401 napi_gro_receive(&fp->napi, skb);
402 } else {
403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
404 " - dropping packet!\n");
405 dev_kfree_skb(skb);
406 }
407
408
409 /* put new skb in bin */
410 fp->tpa_pool[queue].skb = new_skb;
411
412 } else {
413 /* else drop the packet and keep the buffer in the bin */
414 DP(NETIF_MSG_RX_STATUS,
415 "Failed to allocate new skb - dropping packet!\n");
416 fp->eth_q_stats.rx_skb_alloc_failed++;
417 }
418
419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
420}
421
422/* Set Toeplitz hash value in the skb using the value from the
423 * CQE (calculated by HW).
424 */
425static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
426 struct sk_buff *skb)
427{
428 /* Set Toeplitz hash from CQE */
429 if ((bp->dev->features & NETIF_F_RXHASH) &&
430 (cqe->fast_path_cqe.status_flags &
431 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
432 skb->rxhash =
433 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
434}
435
436int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
437{
438 struct bnx2x *bp = fp->bp;
439 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
440 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
441 int rx_pkt = 0;
442
443#ifdef BNX2X_STOP_ON_ERROR
444 if (unlikely(bp->panic))
445 return 0;
446#endif
447
448 /* CQ "next element" is of the size of the regular element,
449 that's why it's ok here */
450 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
451 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
452 hw_comp_cons++;
453
454 bd_cons = fp->rx_bd_cons;
455 bd_prod = fp->rx_bd_prod;
456 bd_prod_fw = bd_prod;
457 sw_comp_cons = fp->rx_comp_cons;
458 sw_comp_prod = fp->rx_comp_prod;
459
460 /* Memory barrier necessary as speculative reads of the rx
461 * buffer can be ahead of the index in the status block
462 */
463 rmb();
464
465 DP(NETIF_MSG_RX_STATUS,
466 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
467 fp->index, hw_comp_cons, sw_comp_cons);
468
469 while (sw_comp_cons != hw_comp_cons) {
470 struct sw_rx_bd *rx_buf = NULL;
471 struct sk_buff *skb;
472 union eth_rx_cqe *cqe;
473 u8 cqe_fp_flags;
474 u16 len, pad;
475
476 comp_ring_cons = RCQ_BD(sw_comp_cons);
477 bd_prod = RX_BD(bd_prod);
478 bd_cons = RX_BD(bd_cons);
479
480 /* Prefetch the page containing the BD descriptor
481 at producer's index. It will be needed when new skb is
482 allocated */
483 prefetch((void *)(PAGE_ALIGN((unsigned long)
484 (&fp->rx_desc_ring[bd_prod])) -
485 PAGE_SIZE + 1));
486
487 cqe = &fp->rx_comp_ring[comp_ring_cons];
488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
489
490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
496
497 /* is this a slowpath msg? */
498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
499 bnx2x_sp_event(fp, cqe);
500 goto next_cqe;
501
502 /* this is an rx packet */
503 } else {
504 rx_buf = &fp->rx_buf_ring[bd_cons];
505 skb = rx_buf->skb;
506 prefetch(skb);
507 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
508 pad = cqe->fast_path_cqe.placement_offset;
509
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000510 /* - If CQE is marked both TPA_START and TPA_END it is
511 * a non-TPA CQE.
512 * - FP CQE will always have either TPA_START or/and
513 * TPA_STOP flags set.
514 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
519
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
523 queue);
524
525 bnx2x_tpa_start(fp, queue, skb,
526 bd_cons, bd_prod);
527
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
530
531 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000532 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000533 DP(NETIF_MSG_RX_STATUS,
534 "calling tpa_stop on queue %d\n",
535 queue);
536
537 if (!BNX2X_RX_SUM_FIX(cqe))
538 BNX2X_ERR("STOP on none TCP "
539 "data\n");
540
541 /* This is a size of the linear data
542 on this skb */
543 len = le16_to_cpu(cqe->fast_path_cqe.
544 len_on_bd);
545 bnx2x_tpa_stop(bp, fp, queue, pad,
546 len, cqe, comp_ring_cons);
547#ifdef BNX2X_STOP_ON_ERROR
548 if (bp->panic)
549 return 0;
550#endif
551
552 bnx2x_update_sge_prod(fp,
553 &cqe->fast_path_cqe);
554 goto next_cqe;
555 }
556 }
557
558 dma_sync_single_for_device(&bp->pdev->dev,
559 dma_unmap_addr(rx_buf, mapping),
560 pad + RX_COPY_THRESH,
561 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000562 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000563
564 /* is this an error packet? */
565 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
566 DP(NETIF_MSG_RX_ERR,
567 "ERROR flags %x rx packet %u\n",
568 cqe_fp_flags, sw_comp_cons);
569 fp->eth_q_stats.rx_err_discard_pkt++;
570 goto reuse_rx;
571 }
572
573 /* Since we don't have a jumbo ring
574 * copy small packets if mtu > 1500
575 */
576 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577 (len <= RX_COPY_THRESH)) {
578 struct sk_buff *new_skb;
579
580 new_skb = netdev_alloc_skb(bp->dev,
581 len + pad);
582 if (new_skb == NULL) {
583 DP(NETIF_MSG_RX_ERR,
584 "ERROR packet dropped "
585 "because of alloc failure\n");
586 fp->eth_q_stats.rx_skb_alloc_failed++;
587 goto reuse_rx;
588 }
589
590 /* aligned copy */
591 skb_copy_from_linear_data_offset(skb, pad,
592 new_skb->data + pad, len);
593 skb_reserve(new_skb, pad);
594 skb_put(new_skb, len);
595
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597
598 skb = new_skb;
599
600 } else
601 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602 dma_unmap_single(&bp->pdev->dev,
603 dma_unmap_addr(rx_buf, mapping),
604 bp->rx_buf_size,
605 DMA_FROM_DEVICE);
606 skb_reserve(skb, pad);
607 skb_put(skb, len);
608
609 } else {
610 DP(NETIF_MSG_RX_ERR,
611 "ERROR packet dropped because "
612 "of alloc failure\n");
613 fp->eth_q_stats.rx_skb_alloc_failed++;
614reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000615 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000616 goto next_rx;
617 }
618
619 skb->protocol = eth_type_trans(skb, bp->dev);
620
621 /* Set Toeplitz hash for a none-LRO skb */
622 bnx2x_set_skb_rxhash(bp, cqe, skb);
623
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700624 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000625
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000626 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 else
630 fp->eth_q_stats.hw_csum_err++;
631 }
632 }
633
634 skb_record_rx_queue(skb, fp->index);
635
636#ifdef BCM_VLAN
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 PARSING_FLAGS_VLAN))
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642 else
643#endif
644 napi_gro_receive(&fp->napi, skb);
645
646
647next_rx:
648 rx_buf->skb = NULL;
649
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
653 rx_pkt++;
654next_cqe:
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657
658 if (rx_pkt == budget)
659 break;
660 } /* while */
661
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
666
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
669 fp->rx_sge_prod);
670
671 fp->rx_pkt += rx_pkt;
672 fp->rx_calls++;
673
674 return rx_pkt;
675}
676
677static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678{
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
681
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
685 return IRQ_HANDLED;
686 }
687
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 "[fp %d fw_sd %d igusb %d]\n",
690 fp->index, fp->fw_sb_id, fp->igu_sb_id);
691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000692
693#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
695 return IRQ_HANDLED;
696#endif
697
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000701 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703
704 return IRQ_HANDLED;
705}
706
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000707/* HW Lock for shared dual port PHYs */
708void bnx2x_acquire_phy_lock(struct bnx2x *bp)
709{
710 mutex_lock(&bp->port.phy_mutex);
711
712 if (bp->port.need_hw_lock)
713 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
714}
715
716void bnx2x_release_phy_lock(struct bnx2x *bp)
717{
718 if (bp->port.need_hw_lock)
719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
720
721 mutex_unlock(&bp->port.phy_mutex);
722}
723
724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
739 line_speed = bp->link_vars.line_speed;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +0000740 if (IS_MF(bp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 u16 vn_max_rate;
742
743 vn_max_rate =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000744 ((bp->mf_config[BP_VN(bp)] &
745 FUNC_MF_CFG_MAX_BW_MASK) >>
746 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000747 if (vn_max_rate < line_speed)
748 line_speed = vn_max_rate;
749 }
750 pr_cont("%d Mbps ", line_speed);
751
752 if (bp->link_vars.duplex == DUPLEX_FULL)
753 pr_cont("full duplex");
754 else
755 pr_cont("half duplex");
756
757 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
758 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
759 pr_cont(", receive ");
760 if (bp->link_vars.flow_ctrl &
761 BNX2X_FLOW_CTRL_TX)
762 pr_cont("& transmit ");
763 } else {
764 pr_cont(", transmit ");
765 }
766 pr_cont("flow control ON");
767 }
768 pr_cont("\n");
769
770 } else { /* link_down */
771 netif_carrier_off(bp->dev);
772 netdev_err(bp->dev, "NIC Link is Down\n");
773 }
774}
775
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000776/* Returns the number of actually allocated BDs */
777static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
778 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000779{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000780 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000781 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000782 int i;
783
784 fp->rx_comp_cons = 0;
785 cqe_ring_prod = ring_prod = 0;
786 for (i = 0; i < rx_ring_size; i++) {
787 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
788 BNX2X_ERR("was only able to allocate "
789 "%d rx skbs on queue[%d]\n", i, fp->index);
790 fp->eth_q_stats.rx_skb_alloc_failed++;
791 break;
792 }
793 ring_prod = NEXT_RX_IDX(ring_prod);
794 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
795 WARN_ON(ring_prod <= i);
796 }
797
798 fp->rx_bd_prod = ring_prod;
799 /* Limit the CQE producer by the CQE ring size */
800 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
801 cqe_ring_prod);
802 fp->rx_pkt = fp->rx_calls = 0;
803
804 return i;
805}
806
807static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
808{
809 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000810 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
811 MAX_RX_AVAIL/bp->num_queues;
812
813 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000814
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000815 bnx2x_alloc_rx_bds(fp, rx_ring_size);
816
817 /* Warning!
818 * this will generate an interrupt (to the TSTORM)
819 * must only be done after chip is initialized
820 */
821 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
822 fp->rx_sge_prod);
823}
824
825void bnx2x_init_rx_rings(struct bnx2x *bp)
826{
827 int func = BP_FUNC(bp);
828 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
829 ETH_MAX_AGGREGATION_QUEUES_E1H;
830 u16 ring_prod;
831 int i, j;
832
833 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
Dmitry Kravkovc8e4f482010-10-17 23:09:30 +0000834 IP_HEADER_ALIGNMENT_PADDING;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000835
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000836 DP(NETIF_MSG_IFUP,
837 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
838
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000839 for_each_queue(bp, j) {
840 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000841
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000842 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000843 for (i = 0; i < max_agg_queues; i++) {
844 fp->tpa_pool[i].skb =
845 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
846 if (!fp->tpa_pool[i].skb) {
847 BNX2X_ERR("Failed to allocate TPA "
848 "skb pool for queue[%d] - "
849 "disabling TPA on this "
850 "queue!\n", j);
851 bnx2x_free_tpa_pool(bp, fp, i);
852 fp->disable_tpa = 1;
853 break;
854 }
855 dma_unmap_addr_set((struct sw_rx_bd *)
856 &bp->fp->tpa_pool[i],
857 mapping, 0);
858 fp->tpa_state[i] = BNX2X_TPA_STOP;
859 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000860
861 /* "next page" elements initialization */
862 bnx2x_set_next_page_sgl(fp);
863
864 /* set SGEs bit mask */
865 bnx2x_init_sge_ring_bit_mask(fp);
866
867 /* Allocate SGEs and initialize the ring elements */
868 for (i = 0, ring_prod = 0;
869 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
870
871 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
872 BNX2X_ERR("was only able to allocate "
873 "%d rx sges\n", i);
874 BNX2X_ERR("disabling TPA for"
875 " queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp,
878 fp, ring_prod);
879 bnx2x_free_tpa_pool(bp,
880 fp, max_agg_queues);
881 fp->disable_tpa = 1;
882 ring_prod = 0;
883 break;
884 }
885 ring_prod = NEXT_SGE_IDX(ring_prod);
886 }
887
888 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889 }
890 }
891
892 for_each_queue(bp, j) {
893 struct bnx2x_fastpath *fp = &bp->fp[j];
894
895 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000896
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000897 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000898
899 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000900 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000901
902 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000903 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000904
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000905 if (j != 0)
906 continue;
907
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000908 if (!CHIP_IS_E2(bp)) {
909 REG_WR(bp, BAR_USTRORM_INTMEM +
910 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
911 U64_LO(fp->rx_comp_mapping));
912 REG_WR(bp, BAR_USTRORM_INTMEM +
913 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
914 U64_HI(fp->rx_comp_mapping));
915 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916 }
917}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000918
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000919static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{
921 int i;
922
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
929
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932 sw_cons++;
933 }
934 }
935}
936
937static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938{
939 int i, j;
940
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
947
948 if (skb == NULL)
949 continue;
950
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
954
955 rx_buf->skb = NULL;
956 dev_kfree_skb(skb);
957 }
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
962 }
963}
964
965void bnx2x_free_skbs(struct bnx2x *bp)
966{
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
969}
970
971static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972{
973 int i, offset = 1;
974
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
978
979#ifdef BCM_CNIC
980 offset++;
981#endif
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
986
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988 }
989}
990
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000991void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000992{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000993 if (bp->flags & USING_MSIX_FLAG)
994 bnx2x_free_msix_irqs(bp);
995 else if (bp->flags & USING_MSI_FLAG)
996 free_irq(bp->pdev->irq, bp->dev);
997 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000998 free_irq(bp->pdev->irq, bp->dev);
999}
1000
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001001int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001002{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001003 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001004
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001005 bp->msix_table[msix_vec].entry = msix_vec;
1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1008 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009
1010#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001011 bp->msix_table[msix_vec].entry = msix_vec;
1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1014 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001015#endif
1016 for_each_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001017 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1020 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001021 }
1022
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1024
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001026
1027 /*
1028 * reconfigure number of tx/rx queues according to available
1029 * MSI-X vectors
1030 */
1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001032 /* how less vectors we will have? */
1033 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001034
1035 DP(NETIF_MSG_IFUP,
1036 "Trying to use less MSI-X vectors: %d\n", rc);
1037
1038 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1039
1040 if (rc) {
1041 DP(NETIF_MSG_IFUP,
1042 "MSI-X is not attainable rc %d\n", rc);
1043 return rc;
1044 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001045 /*
1046 * decrease number of queues by number of unallocated entries
1047 */
1048 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001049
1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1051 bp->num_queues);
1052 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001053 /* fall to INTx if not enough memory */
1054 if (rc == -ENOMEM)
1055 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1057 return rc;
1058 }
1059
1060 bp->flags |= USING_MSIX_FLAG;
1061
1062 return 0;
1063}
1064
1065static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1066{
1067 int i, rc, offset = 1;
1068
1069 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070 bp->dev->name, bp->dev);
1071 if (rc) {
1072 BNX2X_ERR("request sp irq failed\n");
1073 return -EBUSY;
1074 }
1075
1076#ifdef BCM_CNIC
1077 offset++;
1078#endif
1079 for_each_queue(bp, i) {
1080 struct bnx2x_fastpath *fp = &bp->fp[i];
1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1082 bp->dev->name, i);
1083
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001084 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001085 bnx2x_msix_fp_int, 0, fp->name, fp);
1086 if (rc) {
1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1088 bnx2x_free_msix_irqs(bp);
1089 return -EBUSY;
1090 }
1091
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001092 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001093 fp->state = BNX2X_FP_STATE_IRQ;
1094 }
1095
1096 i = BNX2X_NUM_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001097 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104 return 0;
1105}
1106
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001107int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001108{
1109 int rc;
1110
1111 rc = pci_enable_msi(bp->pdev);
1112 if (rc) {
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114 return -1;
1115 }
1116 bp->flags |= USING_MSI_FLAG;
1117
1118 return 0;
1119}
1120
1121static int bnx2x_req_irq(struct bnx2x *bp)
1122{
1123 unsigned long flags;
1124 int rc;
1125
1126 if (bp->flags & USING_MSI_FLAG)
1127 flags = 0;
1128 else
1129 flags = IRQF_SHARED;
1130
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1133 if (!rc)
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136 return rc;
1137}
1138
1139static void bnx2x_napi_enable(struct bnx2x *bp)
1140{
1141 int i;
1142
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147static void bnx2x_napi_disable(struct bnx2x *bp)
1148{
1149 int i;
1150
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1153}
1154
1155void bnx2x_netif_start(struct bnx2x *bp)
1156{
1157 int intr_sem;
1158
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162 if (intr_sem) {
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1168 }
1169 }
1170}
1171
1172void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173{
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1177}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001179void bnx2x_set_num_queues(struct bnx2x *bp)
1180{
1181 switch (bp->multi_mode) {
1182 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001183 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001184 break;
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001188
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001189 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001190 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001191 break;
1192 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001193}
1194
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001195static void bnx2x_release_firmware(struct bnx2x *bp)
1196{
1197 kfree(bp->init_ops_offsets);
1198 kfree(bp->init_ops);
1199 kfree(bp->init_data);
1200 release_firmware(bp->firmware);
1201}
1202
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203/* must be called with rtnl_lock */
1204int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1205{
1206 u32 load_code;
1207 int i, rc;
1208
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001209 /* Set init arrays */
1210 rc = bnx2x_init_firmware(bp);
1211 if (rc) {
1212 BNX2X_ERR("Error loading firmware\n");
1213 return rc;
1214 }
1215
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001216#ifdef BNX2X_STOP_ON_ERROR
1217 if (unlikely(bp->panic))
1218 return -EPERM;
1219#endif
1220
1221 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1222
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001223 /* must be called before memory allocation and HW init */
1224 bnx2x_ilt_set_info(bp);
1225
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001226 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001227 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001228
1229 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1230 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1231 if (rc) {
1232 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1233 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001234 }
1235
1236 for_each_queue(bp, i)
1237 bnx2x_fp(bp, i, disable_tpa) =
1238 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1239
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001240 bnx2x_napi_enable(bp);
1241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001242 /* Send LOAD_REQUEST command to MCP
1243 Returns the type of LOAD command:
1244 if it is the first port to be initialized
1245 common blocks should be initialized, otherwise - not
1246 */
1247 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001248 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001249 if (!load_code) {
1250 BNX2X_ERR("MCP response failure, aborting\n");
1251 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001252 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001253 }
1254 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1255 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001256 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001257 }
1258
1259 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001260 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001261 int port = BP_PORT(bp);
1262
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001263 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1264 path, load_count[path][0], load_count[path][1],
1265 load_count[path][2]);
1266 load_count[path][0]++;
1267 load_count[path][1 + port]++;
1268 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1269 path, load_count[path][0], load_count[path][1],
1270 load_count[path][2]);
1271 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001272 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001273 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001274 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1275 else
1276 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1277 }
1278
1279 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001280 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001281 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1282 bp->port.pmf = 1;
1283 else
1284 bp->port.pmf = 0;
1285 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1286
1287 /* Initialize HW */
1288 rc = bnx2x_init_hw(bp, load_code);
1289 if (rc) {
1290 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001291 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292 goto load_error2;
1293 }
1294
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001295 /* Connect to IRQs */
1296 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001297 if (rc) {
1298 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1299 goto load_error2;
1300 }
1301
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302 /* Setup NIC internals and enable interrupts */
1303 bnx2x_nic_init(bp, load_code);
1304
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001305 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1306 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001307 (bp->common.shmem2_base))
1308 SHMEM2_WR(bp, dcc_support,
1309 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1310 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1311
1312 /* Send LOAD_DONE command to MCP */
1313 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001314 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001315 if (!load_code) {
1316 BNX2X_ERR("MCP response failure, aborting\n");
1317 rc = -EBUSY;
1318 goto load_error3;
1319 }
1320 }
1321
1322 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1323
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001324 rc = bnx2x_func_start(bp);
1325 if (rc) {
1326 BNX2X_ERR("Function start failed!\n");
1327#ifndef BNX2X_STOP_ON_ERROR
1328 goto load_error3;
1329#else
1330 bp->panic = 1;
1331 return -EBUSY;
1332#endif
1333 }
1334
1335 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001336 if (rc) {
1337 BNX2X_ERR("Setup leading failed!\n");
1338#ifndef BNX2X_STOP_ON_ERROR
1339 goto load_error3;
1340#else
1341 bp->panic = 1;
1342 return -EBUSY;
1343#endif
1344 }
1345
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001346 if (!CHIP_IS_E1(bp) &&
1347 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1348 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1349 bp->flags |= MF_FUNC_DIS;
1350 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001351
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001352#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001353 /* Enable Timer scan */
1354 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001356
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1359 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001360#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001361 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001362#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001363 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364#endif
1365 }
1366
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001367 /* Now when Clients are configured we are ready to work */
1368 bp->state = BNX2X_STATE_OPEN;
1369
1370 bnx2x_set_eth_mac(bp, 1);
1371
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001372 if (bp->port.pmf)
1373 bnx2x_initial_phy_init(bp, load_mode);
1374
1375 /* Start fast path */
1376 switch (load_mode) {
1377 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001378 /* Tx queue should be only reenabled */
1379 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001380 /* Initialize the receive filter. */
1381 bnx2x_set_rx_mode(bp->dev);
1382 break;
1383
1384 case LOAD_OPEN:
1385 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001387 /* Initialize the receive filter. */
1388 bnx2x_set_rx_mode(bp->dev);
1389 break;
1390
1391 case LOAD_DIAG:
1392 /* Initialize the receive filter. */
1393 bnx2x_set_rx_mode(bp->dev);
1394 bp->state = BNX2X_STATE_DIAG;
1395 break;
1396
1397 default:
1398 break;
1399 }
1400
1401 if (!bp->port.pmf)
1402 bnx2x__link_status_update(bp);
1403
1404 /* start the timer */
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1406
1407#ifdef BCM_CNIC
1408 bnx2x_setup_cnic_irq_info(bp);
1409 if (bp->state == BNX2X_STATE_OPEN)
1410 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1411#endif
1412 bnx2x_inc_load_cnt(bp);
1413
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001414 bnx2x_release_firmware(bp);
1415
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001416 return 0;
1417
1418#ifdef BCM_CNIC
1419load_error4:
1420 /* Disable Timer scan */
1421 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1422#endif
1423load_error3:
1424 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001425
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001426 /* Free SKBs, SGEs, TPA pool and driver internals */
1427 bnx2x_free_skbs(bp);
1428 for_each_queue(bp, i)
1429 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001430
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001431 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001432 bnx2x_free_irq(bp);
1433load_error2:
1434 if (!BP_NOMCP(bp)) {
1435 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1436 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1437 }
1438
1439 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001440load_error1:
1441 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001442load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443 bnx2x_free_mem(bp);
1444
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001445 bnx2x_release_firmware(bp);
1446
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001447 return rc;
1448}
1449
1450/* must be called with rtnl_lock */
1451int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1452{
1453 int i;
1454
1455 if (bp->state == BNX2X_STATE_CLOSED) {
1456 /* Interface has been removed - nothing to recover */
1457 bp->recovery_state = BNX2X_RECOVERY_DONE;
1458 bp->is_leader = 0;
1459 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1460 smp_wmb();
1461
1462 return -EINVAL;
1463 }
1464
1465#ifdef BCM_CNIC
1466 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1467#endif
1468 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1469
1470 /* Set "drop all" */
1471 bp->rx_mode = BNX2X_RX_MODE_NONE;
1472 bnx2x_set_storm_rx_mode(bp);
1473
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001474 /* Stop Tx */
1475 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001476
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001477 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001478
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001479 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001480 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001481
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001482 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001483
1484 /* Cleanup the chip if needed */
1485 if (unload_mode != UNLOAD_RECOVERY)
1486 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001487 else {
1488 /* Disable HW interrupts, NAPI and Tx */
1489 bnx2x_netif_stop(bp, 1);
1490
1491 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001492 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001493 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001494
1495 bp->port.pmf = 0;
1496
1497 /* Free SKBs, SGEs, TPA pool and driver internals */
1498 bnx2x_free_skbs(bp);
1499 for_each_queue(bp, i)
1500 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001501
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001502 bnx2x_free_mem(bp);
1503
1504 bp->state = BNX2X_STATE_CLOSED;
1505
1506 /* The last driver must disable a "close the gate" if there is no
1507 * parity attention or "process kill" pending.
1508 */
1509 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1510 bnx2x_reset_is_done(bp))
1511 bnx2x_disable_close_the_gate(bp);
1512
1513 /* Reset MCP mail box sequence if there is on going recovery */
1514 if (unload_mode == UNLOAD_RECOVERY)
1515 bp->fw_seq = 0;
1516
1517 return 0;
1518}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001519
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001520int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1521{
1522 u16 pmcsr;
1523
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001524 /* If there is no power capability, silently succeed */
1525 if (!bp->pm_cap) {
1526 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1527 return 0;
1528 }
1529
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001530 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1531
1532 switch (state) {
1533 case PCI_D0:
1534 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1535 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1536 PCI_PM_CTRL_PME_STATUS));
1537
1538 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1539 /* delay required during transition out of D3hot */
1540 msleep(20);
1541 break;
1542
1543 case PCI_D3hot:
1544 /* If there are other clients above don't
1545 shut down the power */
1546 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1547 return 0;
1548 /* Don't shut down the power for emulation and FPGA */
1549 if (CHIP_REV_IS_SLOW(bp))
1550 return 0;
1551
1552 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1553 pmcsr |= 3;
1554
1555 if (bp->wol)
1556 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1557
1558 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1559 pmcsr);
1560
1561 /* No more memory access after this point until
1562 * device is brought back to D0.
1563 */
1564 break;
1565
1566 default:
1567 return -EINVAL;
1568 }
1569 return 0;
1570}
1571
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001572/*
1573 * net_device service functions
1574 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001575int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001576{
1577 int work_done = 0;
1578 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1579 napi);
1580 struct bnx2x *bp = fp->bp;
1581
1582 while (1) {
1583#ifdef BNX2X_STOP_ON_ERROR
1584 if (unlikely(bp->panic)) {
1585 napi_complete(napi);
1586 return 0;
1587 }
1588#endif
1589
1590 if (bnx2x_has_tx_work(fp))
1591 bnx2x_tx_int(fp);
1592
1593 if (bnx2x_has_rx_work(fp)) {
1594 work_done += bnx2x_rx_int(fp, budget - work_done);
1595
1596 /* must not complete if we consumed full budget */
1597 if (work_done >= budget)
1598 break;
1599 }
1600
1601 /* Fall out from the NAPI loop if needed */
1602 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1603 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001604 /* bnx2x_has_rx_work() reads the status block,
1605 * thus we need to ensure that status block indices
1606 * have been actually read (bnx2x_update_fpsb_idx)
1607 * prior to this check (bnx2x_has_rx_work) so that
1608 * we won't write the "newer" value of the status block
1609 * to IGU (if there was a DMA right after
1610 * bnx2x_has_rx_work and if there is no rmb, the memory
1611 * reading (bnx2x_update_fpsb_idx) may be postponed
1612 * to right before bnx2x_ack_sb). In this case there
1613 * will never be another interrupt until there is
1614 * another update of the status block, while there
1615 * is still unhandled work.
1616 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001617 rmb();
1618
1619 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1620 napi_complete(napi);
1621 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001622 DP(NETIF_MSG_HW,
1623 "Update index to %d\n", fp->fp_hc_idx);
1624 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1625 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001626 IGU_INT_ENABLE, 1);
1627 break;
1628 }
1629 }
1630 }
1631
1632 return work_done;
1633}
1634
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001635/* we split the first BD into headers and data BDs
1636 * to ease the pain of our fellow microcode engineers
1637 * we use one mapping for both BDs
1638 * So far this has only been observed to happen
1639 * in Other Operating Systems(TM)
1640 */
1641static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1642 struct bnx2x_fastpath *fp,
1643 struct sw_tx_bd *tx_buf,
1644 struct eth_tx_start_bd **tx_bd, u16 hlen,
1645 u16 bd_prod, int nbd)
1646{
1647 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1648 struct eth_tx_bd *d_tx_bd;
1649 dma_addr_t mapping;
1650 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1651
1652 /* first fix first BD */
1653 h_tx_bd->nbd = cpu_to_le16(nbd);
1654 h_tx_bd->nbytes = cpu_to_le16(hlen);
1655
1656 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1657 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1658 h_tx_bd->addr_lo, h_tx_bd->nbd);
1659
1660 /* now get a new data BD
1661 * (after the pbd) and fill it */
1662 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1663 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1664
1665 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1666 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1667
1668 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1669 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1670 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1671
1672 /* this marks the BD as one that has no individual mapping */
1673 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1674
1675 DP(NETIF_MSG_TX_QUEUED,
1676 "TSO split data size is %d (%x:%x)\n",
1677 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1678
1679 /* update tx_bd */
1680 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1681
1682 return bd_prod;
1683}
1684
1685static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1686{
1687 if (fix > 0)
1688 csum = (u16) ~csum_fold(csum_sub(csum,
1689 csum_partial(t_header - fix, fix, 0)));
1690
1691 else if (fix < 0)
1692 csum = (u16) ~csum_fold(csum_add(csum,
1693 csum_partial(t_header, -fix, 0)));
1694
1695 return swab16(csum);
1696}
1697
1698static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1699{
1700 u32 rc;
1701
1702 if (skb->ip_summed != CHECKSUM_PARTIAL)
1703 rc = XMIT_PLAIN;
1704
1705 else {
1706 if (skb->protocol == htons(ETH_P_IPV6)) {
1707 rc = XMIT_CSUM_V6;
1708 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1709 rc |= XMIT_CSUM_TCP;
1710
1711 } else {
1712 rc = XMIT_CSUM_V4;
1713 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1714 rc |= XMIT_CSUM_TCP;
1715 }
1716 }
1717
1718 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1719 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1720
1721 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1722 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1723
1724 return rc;
1725}
1726
1727#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1728/* check if packet requires linearization (packet is too fragmented)
1729 no need to check fragmentation if page size > 8K (there will be no
1730 violation to FW restrictions) */
1731static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1732 u32 xmit_type)
1733{
1734 int to_copy = 0;
1735 int hlen = 0;
1736 int first_bd_sz = 0;
1737
1738 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1739 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1740
1741 if (xmit_type & XMIT_GSO) {
1742 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1743 /* Check if LSO packet needs to be copied:
1744 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1745 int wnd_size = MAX_FETCH_BD - 3;
1746 /* Number of windows to check */
1747 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1748 int wnd_idx = 0;
1749 int frag_idx = 0;
1750 u32 wnd_sum = 0;
1751
1752 /* Headers length */
1753 hlen = (int)(skb_transport_header(skb) - skb->data) +
1754 tcp_hdrlen(skb);
1755
1756 /* Amount of data (w/o headers) on linear part of SKB*/
1757 first_bd_sz = skb_headlen(skb) - hlen;
1758
1759 wnd_sum = first_bd_sz;
1760
1761 /* Calculate the first sum - it's special */
1762 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1763 wnd_sum +=
1764 skb_shinfo(skb)->frags[frag_idx].size;
1765
1766 /* If there was data on linear skb data - check it */
1767 if (first_bd_sz > 0) {
1768 if (unlikely(wnd_sum < lso_mss)) {
1769 to_copy = 1;
1770 goto exit_lbl;
1771 }
1772
1773 wnd_sum -= first_bd_sz;
1774 }
1775
1776 /* Others are easier: run through the frag list and
1777 check all windows */
1778 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1779 wnd_sum +=
1780 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1781
1782 if (unlikely(wnd_sum < lso_mss)) {
1783 to_copy = 1;
1784 break;
1785 }
1786 wnd_sum -=
1787 skb_shinfo(skb)->frags[wnd_idx].size;
1788 }
1789 } else {
1790 /* in non-LSO too fragmented packet should always
1791 be linearized */
1792 to_copy = 1;
1793 }
1794 }
1795
1796exit_lbl:
1797 if (unlikely(to_copy))
1798 DP(NETIF_MSG_TX_QUEUED,
1799 "Linearization IS REQUIRED for %s packet. "
1800 "num_frags %d hlen %d first_bd_sz %d\n",
1801 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1802 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1803
1804 return to_copy;
1805}
1806#endif
1807
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001808static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1809 struct eth_tx_parse_bd_e2 *pbd,
1810 u32 xmit_type)
1811{
1812 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1813 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1814 if ((xmit_type & XMIT_GSO_V6) &&
1815 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1816 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1817}
1818
1819/**
1820 * Update PBD in GSO case.
1821 *
1822 * @param skb
1823 * @param tx_start_bd
1824 * @param pbd
1825 * @param xmit_type
1826 */
1827static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1828 struct eth_tx_parse_bd_e1x *pbd,
1829 u32 xmit_type)
1830{
1831 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1832 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1833 pbd->tcp_flags = pbd_tcp_flags(skb);
1834
1835 if (xmit_type & XMIT_GSO_V4) {
1836 pbd->ip_id = swab16(ip_hdr(skb)->id);
1837 pbd->tcp_pseudo_csum =
1838 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1839 ip_hdr(skb)->daddr,
1840 0, IPPROTO_TCP, 0));
1841
1842 } else
1843 pbd->tcp_pseudo_csum =
1844 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1845 &ipv6_hdr(skb)->daddr,
1846 0, IPPROTO_TCP, 0));
1847
1848 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1849}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001850
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001851/**
1852 *
1853 * @param skb
1854 * @param tx_start_bd
1855 * @param pbd_e2
1856 * @param xmit_type
1857 *
1858 * @return header len
1859 */
1860static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1861 struct eth_tx_parse_bd_e2 *pbd,
1862 u32 xmit_type)
1863{
1864 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1865 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1866
1867 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1868 skb->data) / 2) <<
1869 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1870
1871 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1872}
1873
1874/**
1875 *
1876 * @param skb
1877 * @param tx_start_bd
1878 * @param pbd
1879 * @param xmit_type
1880 *
1881 * @return Header length
1882 */
1883static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1884 struct eth_tx_parse_bd_e1x *pbd,
1885 u32 xmit_type)
1886{
1887 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1888
1889 /* for now NS flag is not used in Linux */
1890 pbd->global_data =
1891 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1892 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1893
1894 pbd->ip_hlen_w = (skb_transport_header(skb) -
1895 skb_network_header(skb)) / 2;
1896
1897 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1898
1899 pbd->total_hlen_w = cpu_to_le16(hlen);
1900 hlen = hlen*2;
1901
1902 if (xmit_type & XMIT_CSUM_TCP) {
1903 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1904
1905 } else {
1906 s8 fix = SKB_CS_OFF(skb); /* signed! */
1907
1908 DP(NETIF_MSG_TX_QUEUED,
1909 "hlen %d fix %d csum before fix %x\n",
1910 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1911
1912 /* HW bug: fixup the CSUM */
1913 pbd->tcp_pseudo_csum =
1914 bnx2x_csum_fix(skb_transport_header(skb),
1915 SKB_CS(skb), fix);
1916
1917 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1918 pbd->tcp_pseudo_csum);
1919 }
1920
1921 return hlen;
1922}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001923
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001924/* called with netif_tx_lock
1925 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1926 * netif_wake_queue()
1927 */
1928netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1929{
1930 struct bnx2x *bp = netdev_priv(dev);
1931 struct bnx2x_fastpath *fp;
1932 struct netdev_queue *txq;
1933 struct sw_tx_bd *tx_buf;
1934 struct eth_tx_start_bd *tx_start_bd;
1935 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001936 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001937 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001938 u16 pkt_prod, bd_prod;
1939 int nbd, fp_index;
1940 dma_addr_t mapping;
1941 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1942 int i;
1943 u8 hlen = 0;
1944 __le16 pkt_size = 0;
1945 struct ethhdr *eth;
1946 u8 mac_type = UNICAST_ADDRESS;
1947
1948#ifdef BNX2X_STOP_ON_ERROR
1949 if (unlikely(bp->panic))
1950 return NETDEV_TX_BUSY;
1951#endif
1952
1953 fp_index = skb_get_queue_mapping(skb);
1954 txq = netdev_get_tx_queue(dev, fp_index);
1955
1956 fp = &bp->fp[fp_index];
1957
1958 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1959 fp->eth_q_stats.driver_xoff++;
1960 netif_tx_stop_queue(txq);
1961 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1962 return NETDEV_TX_BUSY;
1963 }
1964
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001965 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1966 "protocol(%x,%x) gso type %x xmit_type %x\n",
1967 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001968 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1969
1970 eth = (struct ethhdr *)skb->data;
1971
1972 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1973 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1974 if (is_broadcast_ether_addr(eth->h_dest))
1975 mac_type = BROADCAST_ADDRESS;
1976 else
1977 mac_type = MULTICAST_ADDRESS;
1978 }
1979
1980#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1981 /* First, check if we need to linearize the skb (due to FW
1982 restrictions). No need to check fragmentation if page size > 8K
1983 (there will be no violation to FW restrictions) */
1984 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1985 /* Statistics of linearization */
1986 bp->lin_cnt++;
1987 if (skb_linearize(skb) != 0) {
1988 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1989 "silently dropping this SKB\n");
1990 dev_kfree_skb_any(skb);
1991 return NETDEV_TX_OK;
1992 }
1993 }
1994#endif
1995
1996 /*
1997 Please read carefully. First we use one BD which we mark as start,
1998 then we have a parsing info BD (used for TSO or xsum),
1999 and only then we have the rest of the TSO BDs.
2000 (don't forget to mark the last one as last,
2001 and to unmap only AFTER you write to the BD ...)
2002 And above all, all pdb sizes are in words - NOT DWORDS!
2003 */
2004
2005 pkt_prod = fp->tx_pkt_prod++;
2006 bd_prod = TX_BD(fp->tx_bd_prod);
2007
2008 /* get a tx_buf and first BD */
2009 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2010 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2011
2012 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002013 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2014 mac_type);
2015
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002016 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002017 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002018
2019 /* remember the first BD of the packet */
2020 tx_buf->first_bd = fp->tx_bd_prod;
2021 tx_buf->skb = skb;
2022 tx_buf->flags = 0;
2023
2024 DP(NETIF_MSG_TX_QUEUED,
2025 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2026 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2027
2028#ifdef BCM_VLAN
Jesse Grosseab6d182010-10-20 13:56:03 +00002029 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002030 tx_start_bd->vlan_or_ethertype =
2031 cpu_to_le16(vlan_tx_tag_get(skb));
2032 tx_start_bd->bd_flags.as_bitfield |=
2033 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002034 } else
2035#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002036 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002037
2038 /* turn on parsing and get a BD */
2039 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002040
2041 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002042 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2043
2044 if (xmit_type & XMIT_CSUM_V4)
2045 tx_start_bd->bd_flags.as_bitfield |=
2046 ETH_TX_BD_FLAGS_IP_CSUM;
2047 else
2048 tx_start_bd->bd_flags.as_bitfield |=
2049 ETH_TX_BD_FLAGS_IPV6;
2050
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002051 if (!(xmit_type & XMIT_CSUM_TCP))
2052 tx_start_bd->bd_flags.as_bitfield |=
2053 ETH_TX_BD_FLAGS_IS_UDP;
2054 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002055
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002056 if (CHIP_IS_E2(bp)) {
2057 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2058 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2059 /* Set PBD in checksum offload case */
2060 if (xmit_type & XMIT_CSUM)
2061 hlen = bnx2x_set_pbd_csum_e2(bp,
2062 skb, pbd_e2, xmit_type);
2063 } else {
2064 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2065 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2066 /* Set PBD in checksum offload case */
2067 if (xmit_type & XMIT_CSUM)
2068 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002069
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002070 }
2071
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002072 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002073 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2074 skb_headlen(skb), DMA_TO_DEVICE);
2075
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002076 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002077 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2078 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2079 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2080 tx_start_bd->nbd = cpu_to_le16(nbd);
2081 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2082 pkt_size = tx_start_bd->nbytes;
2083
2084 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2085 " nbytes %d flags %x vlan %x\n",
2086 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2087 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002088 tx_start_bd->bd_flags.as_bitfield,
2089 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002090
2091 if (xmit_type & XMIT_GSO) {
2092
2093 DP(NETIF_MSG_TX_QUEUED,
2094 "TSO packet len %d hlen %d total len %d tso size %d\n",
2095 skb->len, hlen, skb_headlen(skb),
2096 skb_shinfo(skb)->gso_size);
2097
2098 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2099
2100 if (unlikely(skb_headlen(skb) > hlen))
2101 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2102 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002103 if (CHIP_IS_E2(bp))
2104 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2105 else
2106 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002107 }
2108 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2109
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002110 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2113
2114 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2115 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2116 if (total_pkt_bd == NULL)
2117 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2118
2119 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2120 frag->page_offset,
2121 frag->size, DMA_TO_DEVICE);
2122
2123 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2124 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2125 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2126 le16_add_cpu(&pkt_size, frag->size);
2127
2128 DP(NETIF_MSG_TX_QUEUED,
2129 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2130 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2131 le16_to_cpu(tx_data_bd->nbytes));
2132 }
2133
2134 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2135
2136 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2137
2138 /* now send a tx doorbell, counting the next BD
2139 * if the packet contains or ends with it
2140 */
2141 if (TX_BD_POFF(bd_prod) < nbd)
2142 nbd++;
2143
2144 if (total_pkt_bd != NULL)
2145 total_pkt_bd->total_pkt_bytes = pkt_size;
2146
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002147 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002148 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002149 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002150 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002151 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2152 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2153 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2154 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002155 if (pbd_e2)
2156 DP(NETIF_MSG_TX_QUEUED,
2157 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2158 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2159 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2160 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2161 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002162 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2163
2164 /*
2165 * Make sure that the BD data is updated before updating the producer
2166 * since FW might read the BD right after the producer is updated.
2167 * This is only applicable for weak-ordered memory model archs such
2168 * as IA-64. The following barrier is also mandatory since FW will
2169 * assumes packets must have BDs.
2170 */
2171 wmb();
2172
2173 fp->tx_db.data.prod += nbd;
2174 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002175
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002176 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002177
2178 mmiowb();
2179
2180 fp->tx_bd_prod += nbd;
2181
2182 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2183 netif_tx_stop_queue(txq);
2184
2185 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2186 * ordering of set_bit() in netif_tx_stop_queue() and read of
2187 * fp->bd_tx_cons */
2188 smp_mb();
2189
2190 fp->eth_q_stats.driver_xoff++;
2191 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2192 netif_tx_wake_queue(txq);
2193 }
2194 fp->tx_pkt++;
2195
2196 return NETDEV_TX_OK;
2197}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002198
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002199/* called with rtnl_lock */
2200int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2201{
2202 struct sockaddr *addr = p;
2203 struct bnx2x *bp = netdev_priv(dev);
2204
2205 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2206 return -EINVAL;
2207
2208 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002209 if (netif_running(dev))
2210 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002211
2212 return 0;
2213}
2214
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002215
2216int bnx2x_setup_irqs(struct bnx2x *bp)
2217{
2218 int rc = 0;
2219 if (bp->flags & USING_MSIX_FLAG) {
2220 rc = bnx2x_req_msix_irqs(bp);
2221 if (rc)
2222 return rc;
2223 } else {
2224 bnx2x_ack_int(bp);
2225 rc = bnx2x_req_irq(bp);
2226 if (rc) {
2227 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2228 return rc;
2229 }
2230 if (bp->flags & USING_MSI_FLAG) {
2231 bp->dev->irq = bp->pdev->irq;
2232 netdev_info(bp->dev, "using MSI IRQ %d\n",
2233 bp->pdev->irq);
2234 }
2235 }
2236
2237 return 0;
2238}
2239
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002240void bnx2x_free_mem_bp(struct bnx2x *bp)
2241{
2242 kfree(bp->fp);
2243 kfree(bp->msix_table);
2244 kfree(bp->ilt);
2245}
2246
2247int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2248{
2249 struct bnx2x_fastpath *fp;
2250 struct msix_entry *tbl;
2251 struct bnx2x_ilt *ilt;
2252
2253 /* fp array */
2254 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2255 if (!fp)
2256 goto alloc_err;
2257 bp->fp = fp;
2258
2259 /* msix table */
2260 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2261 GFP_KERNEL);
2262 if (!tbl)
2263 goto alloc_err;
2264 bp->msix_table = tbl;
2265
2266 /* ilt */
2267 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2268 if (!ilt)
2269 goto alloc_err;
2270 bp->ilt = ilt;
2271
2272 return 0;
2273alloc_err:
2274 bnx2x_free_mem_bp(bp);
2275 return -ENOMEM;
2276
2277}
2278
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002279/* called with rtnl_lock */
2280int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2281{
2282 struct bnx2x *bp = netdev_priv(dev);
2283 int rc = 0;
2284
2285 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2286 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2287 return -EAGAIN;
2288 }
2289
2290 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2291 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2292 return -EINVAL;
2293
2294 /* This does not race with packet allocation
2295 * because the actual alloc size is
2296 * only updated as part of load
2297 */
2298 dev->mtu = new_mtu;
2299
2300 if (netif_running(dev)) {
2301 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2302 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2303 }
2304
2305 return rc;
2306}
2307
2308void bnx2x_tx_timeout(struct net_device *dev)
2309{
2310 struct bnx2x *bp = netdev_priv(dev);
2311
2312#ifdef BNX2X_STOP_ON_ERROR
2313 if (!bp->panic)
2314 bnx2x_panic();
2315#endif
2316 /* This allows the netif to be shutdown gracefully before resetting */
2317 schedule_delayed_work(&bp->reset_task, 0);
2318}
2319
2320#ifdef BCM_VLAN
2321/* called with rtnl_lock */
2322void bnx2x_vlan_rx_register(struct net_device *dev,
2323 struct vlan_group *vlgrp)
2324{
2325 struct bnx2x *bp = netdev_priv(dev);
2326
2327 bp->vlgrp = vlgrp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002328}
2329
2330#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002331
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002332int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2333{
2334 struct net_device *dev = pci_get_drvdata(pdev);
2335 struct bnx2x *bp;
2336
2337 if (!dev) {
2338 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2339 return -ENODEV;
2340 }
2341 bp = netdev_priv(dev);
2342
2343 rtnl_lock();
2344
2345 pci_save_state(pdev);
2346
2347 if (!netif_running(dev)) {
2348 rtnl_unlock();
2349 return 0;
2350 }
2351
2352 netif_device_detach(dev);
2353
2354 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2355
2356 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2357
2358 rtnl_unlock();
2359
2360 return 0;
2361}
2362
2363int bnx2x_resume(struct pci_dev *pdev)
2364{
2365 struct net_device *dev = pci_get_drvdata(pdev);
2366 struct bnx2x *bp;
2367 int rc;
2368
2369 if (!dev) {
2370 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2371 return -ENODEV;
2372 }
2373 bp = netdev_priv(dev);
2374
2375 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2376 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2377 return -EAGAIN;
2378 }
2379
2380 rtnl_lock();
2381
2382 pci_restore_state(pdev);
2383
2384 if (!netif_running(dev)) {
2385 rtnl_unlock();
2386 return 0;
2387 }
2388
2389 bnx2x_set_power_state(bp, PCI_D0);
2390 netif_device_attach(dev);
2391
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002392 /* Since the chip was reset, clear the FW sequence number */
2393 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002394 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2395
2396 rtnl_unlock();
2397
2398 return rc;
2399}