blob: e8e97a7d1d06df9a209c741406af32c19a1313e2 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000026#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000027#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000028#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000029#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000043 * source onto the target. Update txdata pointers and related
44 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000050 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000054 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000056
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000060 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000063
Barak Witkowski15192a82012-06-19 07:48:28 +000064 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
Merav Sicron65565882012-06-19 07:48:26 +000070 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087}
88
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030089int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000091/* free skb in the packet ring at pos idx
92 * return idx of last bd freed
93 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000094static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000095 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000097{
Ariel Elior6383c0b2011-07-14 08:31:57 +000098 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000099 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103 int nbd;
104
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
106 prefetch(&skb->end);
107
Merav Sicron51c1a582012-03-18 10:33:38 +0000108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000109 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000110
111 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118#ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
121 bnx2x_panic();
122 }
123#endif
124 new_cons = nbd + tx_buf->first_bd;
125
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129 /* Skip a parse bd... */
130 --nbd;
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135 --nbd;
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137 }
138
139 /* now free frags */
140 while (nbd > 0) {
141
Ariel Elior6383c0b2011-07-14 08:31:57 +0000142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145 if (--nbd)
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147 }
148
149 /* release skb */
150 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000151 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000152 (*pkts_compl)++;
153 (*bytes_compl) += skb->len;
154 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000155
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000156 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000157 tx_buf->first_bd = 0;
158 tx_buf->skb = NULL;
159
160 return new_cons;
161}
162
Ariel Elior6383c0b2011-07-14 08:31:57 +0000163int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000164{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000167 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169#ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
171 return -1;
172#endif
173
Ariel Elior6383c0b2011-07-14 08:31:57 +0000174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000177
178 while (sw_cons != hw_cons) {
179 u16 pkt_cons;
180
181 pkt_cons = TX_BD(sw_cons);
182
Merav Sicron51c1a582012-03-18 10:33:38 +0000183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
Tom Herbert2df1a702011-11-28 16:33:37 +0000187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
189
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000190 sw_cons++;
191 }
192
Tom Herbert2df1a702011-11-28 16:33:37 +0000193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000197
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
202 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206 */
207 smp_mb();
208
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214 *
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
217 * stops the queue
218 */
219
220 __netif_tx_lock(txq, smp_processor_id());
221
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000225 netif_tx_wake_queue(txq);
226
227 __netif_tx_unlock(txq);
228 }
229 return 0;
230}
231
232static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233 u16 idx)
234{
235 u16 last_max = fp->last_max_sge;
236
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
239}
240
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000241static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242 u16 sge_len,
243 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000244{
245 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000246 u16 last_max, last_elem, first_elem;
247 u16 delta = 0;
248 u16 i;
249
250 if (!sge_len)
251 return;
252
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000257
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000260
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000263 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000265
266 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000269
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
272 last_elem++;
273
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
277 break;
278
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000281 }
282
283 if (delta > 0) {
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
287 }
288
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
292}
293
Eric Dumazete52fcb22011-11-14 06:05:34 +0000294/* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
296 */
297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000298 const struct eth_fast_path_rx_cqe *cqe,
299 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000300{
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
305
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000309 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000310 }
311 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000312 return 0;
313}
314
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000315static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000316 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300317 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000318{
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
323 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000326
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
330
Eric Dumazete52fcb22011-11-14 06:05:34 +0000331 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300332 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000333 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300334 fp->rx_buf_size, DMA_FROM_DEVICE);
335 /*
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
339 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000340
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000343 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
345 return;
346 }
347
Eric Dumazete52fcb22011-11-14 06:05:34 +0000348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000351 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
354
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
357
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
371 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300372
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000373#ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375#ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377#else
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
379#endif
380 fp->tpa_queue_used);
381#endif
382}
383
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000384/* Timestamp option length allowed for TPA aggregation:
385 *
386 * nop nop kind length echo val
387 */
388#define TPA_TSTAMP_OPT_LEN 12
389/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000391 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000392 * @bp: driver handle
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
395 * aggregation.
396 *
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000399 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000400static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
401 u16 len_on_bd)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000402{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300403 /*
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000406 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
412 else /* IPv4 */
413 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000414
415
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
418 *
419 * Otherwise FW would close the aggregation.
420 */
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
423
424 return len_on_bd - hdrs_len;
425}
426
Eric Dumazet1191cb82012-04-27 21:39:21 +0000427static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
429{
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433 dma_addr_t mapping;
434
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
437 return -ENOMEM;
438 }
439
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
445 return -ENOMEM;
446 }
447
448 sw_buf->page = page;
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
450
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
453
454 return 0;
455}
456
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000457static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000458 struct bnx2x_agg_info *tpa_info,
459 u16 pages,
460 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300461 struct eth_end_agg_rx_cqe *cqe,
462 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000463{
464 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000468 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000469
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000471
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
475 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000476
477 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000478 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000481
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000482 /* set for GRO */
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
489 }
490
491
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000492#ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
495 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497 bnx2x_panic();
498 return -EINVAL;
499 }
500#endif
501
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000505
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
510 else /* LRO */
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
513
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 rx_pg = &fp->rx_page_ring[sge_idx];
515 old_rx_pg = *rx_pg;
516
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
520 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000522 return err;
523 }
524
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
532 else { /* GRO */
533 int rem;
534 int offset = 0;
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
539 if (offset)
540 get_page(old_rx_pg.page);
541 offset += len;
542 }
543 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000544
545 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000547 skb->len += frag_len;
548
549 frag_size -= frag_len;
550 }
551
552 return 0;
553}
554
Eric Dumazet1191cb82012-04-27 21:39:21 +0000555static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info,
557 u16 pages,
558 struct eth_end_agg_rx_cqe *cqe,
559 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000560{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300561 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000562 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300563 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000564 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000565 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300566 u8 old_tpa_state = tpa_info->tpa_state;
567
568 tpa_info->tpa_state = BNX2X_TPA_STOP;
569
570 /* If we there was an error during the handling of the TPA_START -
571 * drop this aggregation.
572 */
573 if (old_tpa_state == BNX2X_TPA_ERROR)
574 goto drop;
575
Eric Dumazete52fcb22011-11-14 06:05:34 +0000576 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
579 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation
581 fails. */
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800583 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000584 if (likely(new_data))
Eric Dumazetd3836f22012-04-27 00:33:38 +0000585 skb = build_skb(data, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000586
Eric Dumazete52fcb22011-11-14 06:05:34 +0000587 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000588#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800589 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000590 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800591 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000592 bnx2x_panic();
593 return;
594 }
595#endif
596
Eric Dumazete52fcb22011-11-14 06:05:34 +0000597 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000599 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000600 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000601
602 skb->protocol = eth_type_trans(skb, bp->dev);
603 skb->ip_summed = CHECKSUM_UNNECESSARY;
604
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000605 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300607 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
608 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000609 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000610 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000611 DP(NETIF_MSG_RX_STATUS,
612 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000613 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000614 }
615
616
Eric Dumazete52fcb22011-11-14 06:05:34 +0000617 /* put new data in bin */
618 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000619
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300620 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000621 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000622 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300623drop:
624 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS,
626 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000628}
629
Eric Dumazet1191cb82012-04-27 21:39:21 +0000630static int bnx2x_alloc_rx_data(struct bnx2x *bp,
631 struct bnx2x_fastpath *fp, u16 index)
632{
633 u8 *data;
634 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
636 dma_addr_t mapping;
637
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
639 if (unlikely(data == NULL))
640 return -ENOMEM;
641
642 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
643 fp->rx_buf_size,
644 DMA_FROM_DEVICE);
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
646 kfree(data);
647 BNX2X_ERR("Can't map rx data\n");
648 return -ENOMEM;
649 }
650
651 rx_buf->data = data;
652 dma_unmap_addr_set(rx_buf, mapping, mapping);
653
654 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
655 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
656
657 return 0;
658}
659
Barak Witkowski15192a82012-06-19 07:48:28 +0000660static
661void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000664{
Michal Schmidte4889212012-09-13 12:59:44 +0000665 /* Do nothing if no L4 csum validation was done.
666 * We do not check whether IP csum was validated. For IPv4 we assume
667 * that if the card got as far as validating the L4 csum, it also
668 * validated the IP csum. IPv6 has no IP csum.
669 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000670 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000671 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000672 return;
673
Michal Schmidte4889212012-09-13 12:59:44 +0000674 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000675
676 if (cqe->fast_path_cqe.type_error_flags &
677 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
678 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000679 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000680 else
681 skb->ip_summed = CHECKSUM_UNNECESSARY;
682}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000683
684int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
685{
686 struct bnx2x *bp = fp->bp;
687 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
688 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
689 int rx_pkt = 0;
690
691#ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
693 return 0;
694#endif
695
696 /* CQ "next element" is of the size of the regular element,
697 that's why it's ok here */
698 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
699 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
700 hw_comp_cons++;
701
702 bd_cons = fp->rx_bd_cons;
703 bd_prod = fp->rx_bd_prod;
704 bd_prod_fw = bd_prod;
705 sw_comp_cons = fp->rx_comp_cons;
706 sw_comp_prod = fp->rx_comp_prod;
707
708 /* Memory barrier necessary as speculative reads of the rx
709 * buffer can be ahead of the index in the status block
710 */
711 rmb();
712
713 DP(NETIF_MSG_RX_STATUS,
714 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
715 fp->index, hw_comp_cons, sw_comp_cons);
716
717 while (sw_comp_cons != hw_comp_cons) {
718 struct sw_rx_bd *rx_buf = NULL;
719 struct sk_buff *skb;
720 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300721 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000722 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300723 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000724 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000725 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000726 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000727
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300728#ifdef BNX2X_STOP_ON_ERROR
729 if (unlikely(bp->panic))
730 return 0;
731#endif
732
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000733 comp_ring_cons = RCQ_BD(sw_comp_cons);
734 bd_prod = RX_BD(bd_prod);
735 bd_cons = RX_BD(bd_cons);
736
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000737 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300738 cqe_fp = &cqe->fast_path_cqe;
739 cqe_fp_flags = cqe_fp->type_error_flags;
740 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741
Merav Sicron51c1a582012-03-18 10:33:38 +0000742 DP(NETIF_MSG_RX_STATUS,
743 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
744 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300745 cqe_fp_flags, cqe_fp->status_flags,
746 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000747 le16_to_cpu(cqe_fp->vlan_tag),
748 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000749
750 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300751 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000752 bnx2x_sp_event(fp, cqe);
753 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000754 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000755
Eric Dumazete52fcb22011-11-14 06:05:34 +0000756 rx_buf = &fp->rx_buf_ring[bd_cons];
757 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000758
Eric Dumazete52fcb22011-11-14 06:05:34 +0000759 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000760 struct bnx2x_agg_info *tpa_info;
761 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300762#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000763 /* sanity check */
764 if (fp->disable_tpa &&
765 (CQE_TYPE_START(cqe_fp_type) ||
766 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000767 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000768 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300769#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000770
Eric Dumazete52fcb22011-11-14 06:05:34 +0000771 if (CQE_TYPE_START(cqe_fp_type)) {
772 u16 queue = cqe_fp->queue_index;
773 DP(NETIF_MSG_RX_STATUS,
774 "calling tpa_start on queue %d\n",
775 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000776
Eric Dumazete52fcb22011-11-14 06:05:34 +0000777 bnx2x_tpa_start(fp, queue,
778 bd_cons, bd_prod,
779 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000780
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000781 goto next_rx;
782
783 }
784 queue = cqe->end_agg_cqe.queue_index;
785 tpa_info = &fp->tpa_info[queue];
786 DP(NETIF_MSG_RX_STATUS,
787 "calling tpa_stop on queue %d\n",
788 queue);
789
790 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
791 tpa_info->len_on_bd;
792
793 if (fp->mode == TPA_MODE_GRO)
794 pages = (frag_size + tpa_info->full_page - 1) /
795 tpa_info->full_page;
796 else
797 pages = SGE_PAGE_ALIGN(frag_size) >>
798 SGE_PAGE_SHIFT;
799
800 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
801 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000803 if (bp->panic)
804 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000805#endif
806
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000807 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
808 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000809 }
810 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000811 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000812 pad = cqe_fp->placement_offset;
813 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000814 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000815 pad + RX_COPY_THRESH,
816 DMA_FROM_DEVICE);
817 pad += NET_SKB_PAD;
818 prefetch(data + pad); /* speedup eth_type_trans() */
819 /* is this an error packet? */
820 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000821 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000822 "ERROR flags %x rx packet %u\n",
823 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000824 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000825 goto reuse_rx;
826 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000827
Eric Dumazete52fcb22011-11-14 06:05:34 +0000828 /* Since we don't have a jumbo ring
829 * copy small packets if mtu > 1500
830 */
831 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
832 (len <= RX_COPY_THRESH)) {
833 skb = netdev_alloc_skb_ip_align(bp->dev, len);
834 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000835 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000836 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000837 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000838 goto reuse_rx;
839 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000840 memcpy(skb->data, data + pad, len);
841 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
842 } else {
843 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000844 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000845 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800846 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000847 DMA_FROM_DEVICE);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000848 skb = build_skb(data, 0);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000849 if (unlikely(!skb)) {
850 kfree(data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000851 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000853 goto next_rx;
854 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000855 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000856 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000857 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
858 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000859 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000860reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000861 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000862 goto next_rx;
863 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000864 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000866 skb_put(skb, len);
867 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000868
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000869 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000870 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
871 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000872
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000873 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000874
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000875 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000876 bnx2x_csum_validate(skb, cqe, fp,
877 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000878
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000879 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000880
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300881 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
882 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000883 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300884 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000885 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000886
887
888next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000889 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000890
891 bd_cons = NEXT_RX_IDX(bd_cons);
892 bd_prod = NEXT_RX_IDX(bd_prod);
893 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
894 rx_pkt++;
895next_cqe:
896 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
897 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
898
899 if (rx_pkt == budget)
900 break;
901 } /* while */
902
903 fp->rx_bd_cons = bd_cons;
904 fp->rx_bd_prod = bd_prod_fw;
905 fp->rx_comp_cons = sw_comp_cons;
906 fp->rx_comp_prod = sw_comp_prod;
907
908 /* Update producers */
909 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
910 fp->rx_sge_prod);
911
912 fp->rx_pkt += rx_pkt;
913 fp->rx_calls++;
914
915 return rx_pkt;
916}
917
918static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
919{
920 struct bnx2x_fastpath *fp = fp_cookie;
921 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000922 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000923
Merav Sicron51c1a582012-03-18 10:33:38 +0000924 DP(NETIF_MSG_INTR,
925 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000926 fp->index, fp->fw_sb_id, fp->igu_sb_id);
927 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
931 return IRQ_HANDLED;
932#endif
933
934 /* Handle Rx and Tx according to MSI-X vector */
935 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000936
937 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +0000938 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000939
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000940 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000941 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
942
943 return IRQ_HANDLED;
944}
945
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000946/* HW Lock for shared dual port PHYs */
947void bnx2x_acquire_phy_lock(struct bnx2x *bp)
948{
949 mutex_lock(&bp->port.phy_mutex);
950
951 if (bp->port.need_hw_lock)
952 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
953}
954
955void bnx2x_release_phy_lock(struct bnx2x *bp)
956{
957 if (bp->port.need_hw_lock)
958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
959
960 mutex_unlock(&bp->port.phy_mutex);
961}
962
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800963/* calculates MF speed according to current linespeed and MF configuration */
964u16 bnx2x_get_mf_speed(struct bnx2x *bp)
965{
966 u16 line_speed = bp->link_vars.line_speed;
967 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000968 u16 maxCfg = bnx2x_extract_max_cfg(bp,
969 bp->mf_config[BP_VN(bp)]);
970
971 /* Calculate the current MAX line speed limit for the MF
972 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800973 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000974 if (IS_MF_SI(bp))
975 line_speed = (line_speed * maxCfg) / 100;
976 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800977 u16 vn_max_rate = maxCfg * 100;
978
979 if (vn_max_rate < line_speed)
980 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000981 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800982 }
983
984 return line_speed;
985}
986
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000987/**
988 * bnx2x_fill_report_data - fill link report data to report
989 *
990 * @bp: driver handle
991 * @data: link state to update
992 *
993 * It uses a none-atomic bit operations because is called under the mutex.
994 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000995static void bnx2x_fill_report_data(struct bnx2x *bp,
996 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000997{
998 u16 line_speed = bnx2x_get_mf_speed(bp);
999
1000 memset(data, 0, sizeof(*data));
1001
1002 /* Fill the report data: efective line speed */
1003 data->line_speed = line_speed;
1004
1005 /* Link is down */
1006 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1007 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1008 &data->link_report_flags);
1009
1010 /* Full DUPLEX */
1011 if (bp->link_vars.duplex == DUPLEX_FULL)
1012 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1013
1014 /* Rx Flow Control is ON */
1015 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1016 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1017
1018 /* Tx Flow Control is ON */
1019 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1020 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1021}
1022
1023/**
1024 * bnx2x_link_report - report link status to OS.
1025 *
1026 * @bp: driver handle
1027 *
1028 * Calls the __bnx2x_link_report() under the same locking scheme
1029 * as a link/PHY state managing code to ensure a consistent link
1030 * reporting.
1031 */
1032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001033void bnx2x_link_report(struct bnx2x *bp)
1034{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001035 bnx2x_acquire_phy_lock(bp);
1036 __bnx2x_link_report(bp);
1037 bnx2x_release_phy_lock(bp);
1038}
1039
1040/**
1041 * __bnx2x_link_report - report link status to OS.
1042 *
1043 * @bp: driver handle
1044 *
1045 * None atomic inmlementation.
1046 * Should be called under the phy_lock.
1047 */
1048void __bnx2x_link_report(struct bnx2x *bp)
1049{
1050 struct bnx2x_link_report_data cur_data;
1051
1052 /* reread mf_cfg */
1053 if (!CHIP_IS_E1(bp))
1054 bnx2x_read_mf_cfg(bp);
1055
1056 /* Read the current link report info */
1057 bnx2x_fill_report_data(bp, &cur_data);
1058
1059 /* Don't report link down or exactly the same link status twice */
1060 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1061 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &bp->last_reported_link.link_report_flags) &&
1063 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1064 &cur_data.link_report_flags)))
1065 return;
1066
1067 bp->link_cnt++;
1068
1069 /* We are going to report a new link parameters now -
1070 * remember the current data for the next time.
1071 */
1072 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1073
1074 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1075 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001076 netif_carrier_off(bp->dev);
1077 netdev_err(bp->dev, "NIC Link is Down\n");
1078 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001079 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001080 const char *duplex;
1081 const char *flow;
1082
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001083 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001085 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1086 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001087 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001088 else
Joe Perches94f05b02011-08-14 12:16:20 +00001089 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001090
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001091 /* Handle the FC at the end so that only these flags would be
1092 * possibly set. This way we may easily check if there is no FC
1093 * enabled.
1094 */
1095 if (cur_data.link_report_flags) {
1096 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1097 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001098 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1099 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001100 flow = "ON - receive & transmit";
1101 else
1102 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001103 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001104 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001105 }
Joe Perches94f05b02011-08-14 12:16:20 +00001106 } else {
1107 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001108 }
Joe Perches94f05b02011-08-14 12:16:20 +00001109 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1110 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001111 }
1112}
1113
Eric Dumazet1191cb82012-04-27 21:39:21 +00001114static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1115{
1116 int i;
1117
1118 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119 struct eth_rx_sge *sge;
1120
1121 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1122 sge->addr_hi =
1123 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1124 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1125
1126 sge->addr_lo =
1127 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1128 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1129 }
1130}
1131
1132static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1133 struct bnx2x_fastpath *fp, int last)
1134{
1135 int i;
1136
1137 for (i = 0; i < last; i++) {
1138 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1139 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1140 u8 *data = first_buf->data;
1141
1142 if (data == NULL) {
1143 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1144 continue;
1145 }
1146 if (tpa_info->tpa_state == BNX2X_TPA_START)
1147 dma_unmap_single(&bp->pdev->dev,
1148 dma_unmap_addr(first_buf, mapping),
1149 fp->rx_buf_size, DMA_FROM_DEVICE);
1150 kfree(data);
1151 first_buf->data = NULL;
1152 }
1153}
1154
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001155void bnx2x_init_rx_rings(struct bnx2x *bp)
1156{
1157 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001158 u16 ring_prod;
1159 int i, j;
1160
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001161 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001162 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001163 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001164
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001165 DP(NETIF_MSG_IFUP,
1166 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1167
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001168 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001169 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001170 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001171 struct bnx2x_agg_info *tpa_info =
1172 &fp->tpa_info[i];
1173 struct sw_rx_bd *first_buf =
1174 &tpa_info->first_buf;
1175
Eric Dumazete52fcb22011-11-14 06:05:34 +00001176 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1177 GFP_ATOMIC);
1178 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001179 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1180 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001181 bnx2x_free_tpa_pool(bp, fp, i);
1182 fp->disable_tpa = 1;
1183 break;
1184 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001185 dma_unmap_addr_set(first_buf, mapping, 0);
1186 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001188
1189 /* "next page" elements initialization */
1190 bnx2x_set_next_page_sgl(fp);
1191
1192 /* set SGEs bit mask */
1193 bnx2x_init_sge_ring_bit_mask(fp);
1194
1195 /* Allocate SGEs and initialize the ring elements */
1196 for (i = 0, ring_prod = 0;
1197 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1198
1199 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001200 BNX2X_ERR("was only able to allocate %d rx sges\n",
1201 i);
1202 BNX2X_ERR("disabling TPA for queue[%d]\n",
1203 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001204 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001205 bnx2x_free_rx_sge_range(bp, fp,
1206 ring_prod);
1207 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001208 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001209 fp->disable_tpa = 1;
1210 ring_prod = 0;
1211 break;
1212 }
1213 ring_prod = NEXT_SGE_IDX(ring_prod);
1214 }
1215
1216 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217 }
1218 }
1219
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001220 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001221 struct bnx2x_fastpath *fp = &bp->fp[j];
1222
1223 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001224
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001225 /* Activate BD ring */
1226 /* Warning!
1227 * this will generate an interrupt (to the TSTORM)
1228 * must only be done after chip is initialized
1229 */
1230 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1231 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001233 if (j != 0)
1234 continue;
1235
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001236 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001237 REG_WR(bp, BAR_USTRORM_INTMEM +
1238 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1239 U64_LO(fp->rx_comp_mapping));
1240 REG_WR(bp, BAR_USTRORM_INTMEM +
1241 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1242 U64_HI(fp->rx_comp_mapping));
1243 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001244 }
1245}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001246
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001247static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1248{
1249 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001250 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001251
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001252 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001253 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001254 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00001255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001256 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001257
Ariel Elior6383c0b2011-07-14 08:31:57 +00001258 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001260
Ariel Elior6383c0b2011-07-14 08:31:57 +00001261 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001264 sw_cons++;
1265 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001266 netdev_tx_reset_queue(
Merav Sicron65565882012-06-19 07:48:26 +00001267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001269 }
1270 }
1271}
1272
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001273static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1274{
1275 struct bnx2x *bp = fp->bp;
1276 int i;
1277
1278 /* ring wasn't allocated */
1279 if (fp->rx_buf_ring == NULL)
1280 return;
1281
1282 for (i = 0; i < NUM_RX_BD; i++) {
1283 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001284 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001285
Eric Dumazete52fcb22011-11-14 06:05:34 +00001286 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001287 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001288 dma_unmap_single(&bp->pdev->dev,
1289 dma_unmap_addr(rx_buf, mapping),
1290 fp->rx_buf_size, DMA_FROM_DEVICE);
1291
Eric Dumazete52fcb22011-11-14 06:05:34 +00001292 rx_buf->data = NULL;
1293 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001294 }
1295}
1296
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001297static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001299 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001301 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302 struct bnx2x_fastpath *fp = &bp->fp[j];
1303
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001304 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001305
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001306 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001307 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001308 }
1309}
1310
1311void bnx2x_free_skbs(struct bnx2x *bp)
1312{
1313 bnx2x_free_tx_skbs(bp);
1314 bnx2x_free_rx_skbs(bp);
1315}
1316
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001317void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1318{
1319 /* load old values */
1320 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1321
1322 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1323 /* leave all but MAX value */
1324 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1325
1326 /* set new MAX value */
1327 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1328 & FUNC_MF_CFG_MAX_BW_MASK;
1329
1330 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1331 }
1332}
1333
Dmitry Kravkovca924292011-06-14 01:33:08 +00001334/**
1335 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1336 *
1337 * @bp: driver handle
1338 * @nvecs: number of vectors to be released
1339 */
1340static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001341{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001342 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001343
Dmitry Kravkovca924292011-06-14 01:33:08 +00001344 if (nvecs == offset)
1345 return;
1346 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001348 bp->msix_table[offset].vector);
1349 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001350#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001351 if (nvecs == offset)
1352 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001353 offset++;
1354#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355
Dmitry Kravkovca924292011-06-14 01:33:08 +00001356 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset)
1358 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001359 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1360 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001361
1362 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363 }
1364}
1365
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001366void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001367{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001368 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG))
Dmitry Kravkovca924292011-06-14 01:33:08 +00001370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001371 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001372 else
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001373 free_irq(bp->dev->irq, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374}
1375
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001376int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001377{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001378 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001379
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001380 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001381 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001382 bp->msix_table[0].entry);
1383 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384
1385#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001386 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1389 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001390#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001391 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001392 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001393 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001394 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1395 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001396 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001397 }
1398
Ariel Elior6383c0b2011-07-14 08:31:57 +00001399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001400
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001402
1403 /*
1404 * reconfigure number of tx/rx queues according to available
1405 * MSI-X vectors
1406 */
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001408 /* how less vectors we will have? */
1409 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410
Merav Sicron51c1a582012-03-18 10:33:38 +00001411 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001412
1413 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1414
1415 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001416 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1417 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001418 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001419 /*
1420 * decrease number of queues by number of unallocated entries
1421 */
1422 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001423
Merav Sicron51c1a582012-03-18 10:33:38 +00001424 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001425 bp->num_queues);
1426 } else if (rc > 0) {
1427 /* Get by with single vector */
1428 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1429 if (rc) {
1430 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1431 rc);
1432 goto no_msix;
1433 }
1434
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437
1438 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001440 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001441 }
1442
1443 bp->flags |= USING_MSIX_FLAG;
1444
1445 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001446
1447no_msix:
1448 /* fall to INTx if not enough memory */
1449 if (rc == -ENOMEM)
1450 bp->flags |= DISABLE_MSI_FLAG;
1451
1452 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001453}
1454
1455static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1456{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001457 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458
Dmitry Kravkovca924292011-06-14 01:33:08 +00001459 rc = request_irq(bp->msix_table[offset++].vector,
1460 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001461 bp->dev->name, bp->dev);
1462 if (rc) {
1463 BNX2X_ERR("request sp irq failed\n");
1464 return -EBUSY;
1465 }
1466
1467#ifdef BCM_CNIC
1468 offset++;
1469#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001470 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001471 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1473 bp->dev->name, i);
1474
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001475 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001476 bnx2x_msix_fp_int, 0, fp->name, fp);
1477 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001478 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1479 bp->msix_table[offset].vector, rc);
1480 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001481 return -EBUSY;
1482 }
1483
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001484 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485 }
1486
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001487 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001488 offset = 1 + CNIC_PRESENT;
Merav Sicron51c1a582012-03-18 10:33:38 +00001489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001490 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector,
1492 i - 1, bp->msix_table[offset + i - 1].vector);
1493
1494 return 0;
1495}
1496
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001497int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001498{
1499 int rc;
1500
1501 rc = pci_enable_msi(bp->pdev);
1502 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001503 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001504 return -1;
1505 }
1506 bp->flags |= USING_MSI_FLAG;
1507
1508 return 0;
1509}
1510
1511static int bnx2x_req_irq(struct bnx2x *bp)
1512{
1513 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001514 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001515
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001516 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001517 flags = 0;
1518 else
1519 flags = IRQF_SHARED;
1520
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001521 if (bp->flags & USING_MSIX_FLAG)
1522 irq = bp->msix_table[0].vector;
1523 else
1524 irq = bp->pdev->irq;
1525
1526 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001527}
1528
Eric Dumazet1191cb82012-04-27 21:39:21 +00001529static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001530{
1531 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001532 if (bp->flags & USING_MSIX_FLAG &&
1533 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001534 rc = bnx2x_req_msix_irqs(bp);
1535 if (rc)
1536 return rc;
1537 } else {
1538 bnx2x_ack_int(bp);
1539 rc = bnx2x_req_irq(bp);
1540 if (rc) {
1541 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1542 return rc;
1543 }
1544 if (bp->flags & USING_MSI_FLAG) {
1545 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001546 netdev_info(bp->dev, "using MSI IRQ %d\n",
1547 bp->dev->irq);
1548 }
1549 if (bp->flags & USING_MSIX_FLAG) {
1550 bp->dev->irq = bp->msix_table[0].vector;
1551 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1552 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001553 }
1554 }
1555
1556 return 0;
1557}
1558
Eric Dumazet1191cb82012-04-27 21:39:21 +00001559static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001560{
1561 int i;
1562
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001563 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001564 napi_enable(&bnx2x_fp(bp, i, napi));
1565}
1566
Eric Dumazet1191cb82012-04-27 21:39:21 +00001567static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001568{
1569 int i;
1570
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001571 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001572 napi_disable(&bnx2x_fp(bp, i, napi));
1573}
1574
1575void bnx2x_netif_start(struct bnx2x *bp)
1576{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001577 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp);
1579 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001582 }
1583}
1584
1585void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586{
1587 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001589}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001590
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001591u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001593 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001594
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001595#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001596 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001597 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto);
1599
1600 /* Skip VLAN tag if present */
1601 if (ether_type == ETH_P_8021Q) {
1602 struct vlan_ethhdr *vhdr =
1603 (struct vlan_ethhdr *)skb->data;
1604
1605 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1606 }
1607
1608 /* If ethertype is FCoE or FIP - use FCoE ring */
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001610 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001611 }
1612#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001613 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001615}
1616
Dmitry Kravkov96305232012-04-03 18:41:30 +00001617
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001618void bnx2x_set_num_queues(struct bnx2x *bp)
1619{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001620 /* RSS queues */
1621 bp->num_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001622
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001623#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00001624 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001626 bp->num_queues = 1;
1627#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001628 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001629 bp->num_queues += NON_ETH_CONTEXT_USE;
Merav Sicron65565882012-06-19 07:48:26 +00001630
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001632}
1633
David S. Miller823dcd22011-08-20 10:39:12 -07001634/**
1635 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1636 *
1637 * @bp: Driver handle
1638 *
1639 * We currently support for at most 16 Tx queues for each CoS thus we will
1640 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1641 * bp->max_cos.
1642 *
1643 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1644 * index after all ETH L2 indices.
1645 *
1646 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1647 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1648 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1649 *
1650 * The proper configuration of skb->queue_mapping is handled by
1651 * bnx2x_select_queue() and __skb_tx_hash().
1652 *
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001656static int bnx2x_set_real_num_queues(struct bnx2x *bp)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001657{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001658 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001659
Merav Sicron65565882012-06-19 07:48:26 +00001660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001662
1663/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001664#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001665 if (!NO_FCOE(bp)) {
1666 rx += FCOE_PRESENT;
1667 tx += FCOE_PRESENT;
1668 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001669#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001670
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672 if (rc) {
1673 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1674 return rc;
1675 }
1676 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1677 if (rc) {
1678 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1679 return rc;
1680 }
1681
Merav Sicron51c1a582012-03-18 10:33:38 +00001682 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001683 tx, rx);
1684
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001685 return rc;
1686}
1687
Eric Dumazet1191cb82012-04-27 21:39:21 +00001688static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001689{
1690 int i;
1691
1692 for_each_queue(bp, i) {
1693 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001694 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001695
1696 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1697 if (IS_FCOE_IDX(i))
1698 /*
1699 * Although there are no IP frames expected to arrive to
1700 * this ring we still want to add an
1701 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1702 * overrun attack.
1703 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001704 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001705 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001706 mtu = bp->dev->mtu;
1707 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1708 IP_HEADER_ALIGNMENT_PADDING +
1709 ETH_OVREHEAD +
1710 mtu +
1711 BNX2X_FW_RX_ALIGN_END;
1712 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001713 }
1714}
1715
Eric Dumazet1191cb82012-04-27 21:39:21 +00001716static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001717{
1718 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001719 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1720
Dmitry Kravkov96305232012-04-03 18:41:30 +00001721 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001722 * enabled
1723 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001724 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1725 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001726 bp->fp->cl_id +
1727 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001728
1729 /*
1730 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1731 * per-port, so if explicit configuration is needed , do it only
1732 * for a PMF.
1733 *
1734 * For 57712 and newer on the other hand it's a per-function
1735 * configuration.
1736 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001737 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001738}
1739
Dmitry Kravkov96305232012-04-03 18:41:30 +00001740int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001741 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001742{
Yuval Mintz3b603062012-03-18 10:33:39 +00001743 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001744 int i;
1745
1746 /* Although RSS is meaningless when there is a single HW queue we
1747 * still need it enabled in order to have HW Rx hash generated.
1748 *
1749 * if (!is_eth_multi(bp))
1750 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1751 */
1752
Dmitry Kravkov96305232012-04-03 18:41:30 +00001753 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001754
1755 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1756
Dmitry Kravkov96305232012-04-03 18:41:30 +00001757 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001758
Dmitry Kravkov96305232012-04-03 18:41:30 +00001759 /* RSS configuration */
1760 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1761 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1762 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1763 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001764 if (rss_obj->udp_rss_v4)
1765 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1766 if (rss_obj->udp_rss_v6)
1767 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001768
Dmitry Kravkov96305232012-04-03 18:41:30 +00001769 /* Hash bits */
1770 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001771
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001772 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001773
Dmitry Kravkov96305232012-04-03 18:41:30 +00001774 if (config_hash) {
1775 /* RSS keys */
1776 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1777 params.rss_key[i] = random32();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001778
Dmitry Kravkov96305232012-04-03 18:41:30 +00001779 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001780 }
1781
1782 return bnx2x_config_rss(bp, &params);
1783}
1784
Eric Dumazet1191cb82012-04-27 21:39:21 +00001785static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001786{
Yuval Mintz3b603062012-03-18 10:33:39 +00001787 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001788
1789 /* Prepare parameters for function state transitions */
1790 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1791
1792 func_params.f_obj = &bp->func_obj;
1793 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1794
1795 func_params.params.hw_init.load_phase = load_code;
1796
1797 return bnx2x_func_state_change(bp, &func_params);
1798}
1799
1800/*
1801 * Cleans the object that have internal lists without sending
1802 * ramrods. Should be run when interrutps are disabled.
1803 */
1804static void bnx2x_squeeze_objects(struct bnx2x *bp)
1805{
1806 int rc;
1807 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00001808 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00001809 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001810
1811 /***************** Cleanup MACs' object first *************************/
1812
1813 /* Wait for completion of requested */
1814 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1815 /* Perform a dry cleanup */
1816 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1817
1818 /* Clean ETH primary MAC */
1819 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00001820 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001821 &ramrod_flags);
1822 if (rc != 0)
1823 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1824
1825 /* Cleanup UC list */
1826 vlan_mac_flags = 0;
1827 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1828 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1829 &ramrod_flags);
1830 if (rc != 0)
1831 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1832
1833 /***************** Now clean mcast object *****************************/
1834 rparam.mcast_obj = &bp->mcast_obj;
1835 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1836
1837 /* Add a DEL command... */
1838 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1839 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00001840 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1841 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001842
1843 /* ...and wait until all pending commands are cleared */
1844 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1845 while (rc != 0) {
1846 if (rc < 0) {
1847 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1848 rc);
1849 return;
1850 }
1851
1852 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1853 }
1854}
1855
1856#ifndef BNX2X_STOP_ON_ERROR
1857#define LOAD_ERROR_EXIT(bp, label) \
1858 do { \
1859 (bp)->state = BNX2X_STATE_ERROR; \
1860 goto label; \
1861 } while (0)
1862#else
1863#define LOAD_ERROR_EXIT(bp, label) \
1864 do { \
1865 (bp)->state = BNX2X_STATE_ERROR; \
1866 (bp)->panic = 1; \
1867 return -EBUSY; \
1868 } while (0)
1869#endif
1870
Yuval Mintz452427b2012-03-26 20:47:07 +00001871bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872{
1873 /* build FW version dword */
1874 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1875 (BCM_5710_FW_MINOR_VERSION << 8) +
1876 (BCM_5710_FW_REVISION_VERSION << 16) +
1877 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1878
1879 /* read loaded FW from chip */
1880 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1881
1882 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1883
1884 if (loaded_fw != my_fw) {
1885 if (is_err)
1886 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1887 loaded_fw, my_fw);
1888 return false;
1889 }
1890
1891 return true;
1892}
1893
Eric Dumazet1191cb82012-04-27 21:39:21 +00001894/**
1895 * bnx2x_bz_fp - zero content of the fastpath structure.
1896 *
1897 * @bp: driver handle
1898 * @index: fastpath index to be zeroed
1899 *
1900 * Makes sure the contents of the bp->fp[index].napi is kept
1901 * intact.
1902 */
1903static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1904{
1905 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00001906 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1907
Merav Sicron65565882012-06-19 07:48:26 +00001908 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001909 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00001910 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001911 /* bzero bnx2x_fastpath contents */
Barak Witkowski15192a82012-06-19 07:48:28 +00001912 if (bp->stats_init) {
1913 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001914 memset(fp, 0, sizeof(*fp));
Barak Witkowski15192a82012-06-19 07:48:28 +00001915 } else {
Eric Dumazet1191cb82012-04-27 21:39:21 +00001916 /* Keep Queue statistics */
1917 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1918 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1919
1920 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1921 GFP_KERNEL);
1922 if (tmp_eth_q_stats)
Barak Witkowski15192a82012-06-19 07:48:28 +00001923 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001924 sizeof(struct bnx2x_eth_q_stats));
1925
1926 tmp_eth_q_stats_old =
1927 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1928 GFP_KERNEL);
1929 if (tmp_eth_q_stats_old)
Barak Witkowski15192a82012-06-19 07:48:28 +00001930 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001931 sizeof(struct bnx2x_eth_q_stats_old));
1932
Barak Witkowski15192a82012-06-19 07:48:28 +00001933 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001934 memset(fp, 0, sizeof(*fp));
1935
1936 if (tmp_eth_q_stats) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001937 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1938 sizeof(struct bnx2x_eth_q_stats));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001939 kfree(tmp_eth_q_stats);
1940 }
1941
1942 if (tmp_eth_q_stats_old) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001943 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001944 sizeof(struct bnx2x_eth_q_stats_old));
1945 kfree(tmp_eth_q_stats_old);
1946 }
1947
1948 }
1949
1950 /* Restore the NAPI object as it has been already initialized */
1951 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00001952 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001953 fp->bp = bp;
1954 fp->index = index;
1955 if (IS_ETH_FP(fp))
1956 fp->max_cos = bp->max_cos;
1957 else
1958 /* Special queues support only one CoS */
1959 fp->max_cos = 1;
1960
Merav Sicron65565882012-06-19 07:48:26 +00001961 /* Init txdata pointers */
1962#ifdef BCM_CNIC
1963 if (IS_FCOE_FP(fp))
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965#endif
1966 if (IS_ETH_FP(fp))
1967 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1969 BNX2X_NUM_ETH_QUEUES(bp) + index];
1970
Eric Dumazet1191cb82012-04-27 21:39:21 +00001971 /*
1972 * set the tpa flag for each queue. The tpa flag determines the queue
1973 * minimal size so it must be set prior to queue memory allocation
1974 */
1975 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1976 (bp->flags & GRO_ENABLE_FLAG &&
1977 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1978 if (bp->flags & TPA_ENABLE_FLAG)
1979 fp->mode = TPA_MODE_LRO;
1980 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO;
1982
1983#ifdef BCM_CNIC
1984 /* We don't want TPA on an FCoE L2 ring */
1985 if (IS_FCOE_FP(fp))
1986 fp->disable_tpa = 1;
1987#endif
1988}
1989
1990
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001991/* must be called with rtnl_lock */
1992int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1993{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001994 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001995 u32 load_code;
1996 int i, rc;
1997
1998#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00001999 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002001 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002002 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002003#endif
2004
2005 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2006
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002007 /* Set the initial link reported state to link down */
2008 bnx2x_acquire_phy_lock(bp);
2009 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2010 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2011 &bp->last_reported_link.link_report_flags);
2012 bnx2x_release_phy_lock(bp);
2013
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002014 /* must be called before memory allocation and HW init */
2015 bnx2x_ilt_set_info(bp);
2016
Ariel Elior6383c0b2011-07-14 08:31:57 +00002017 /*
2018 * Zero fastpath structures preserving invariants like napi, which are
2019 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002020 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002021 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002023 for_each_queue(bp, i)
2024 bnx2x_bz_fp(bp, i);
Merav Sicron65565882012-06-19 07:48:26 +00002025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2026 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002027
Ariel Elior6383c0b2011-07-14 08:31:57 +00002028
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002029 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp);
2031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002032 if (bnx2x_alloc_mem(bp))
2033 return -ENOMEM;
2034
2035 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 * come after it.
2038 */
2039 rc = bnx2x_set_real_num_queues(bp);
2040 if (rc) {
2041 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002042 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002043 }
2044
Ariel Elior6383c0b2011-07-14 08:31:57 +00002045 /* configure multi cos mappings in kernel.
2046 * this configuration may be overriden by a multi class queue discipline
2047 * or by a dcbx negotiation result.
2048 */
2049 bnx2x_setup_tc(bp->dev, bp->max_cos);
2050
Merav Sicron26614ba2012-08-27 03:26:19 +00002051 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002053 bnx2x_napi_enable(bp);
2054
Ariel Elior889b9af2012-01-26 06:01:51 +00002055 /* set pf load just before approaching the MCP */
2056 bnx2x_set_pf_load(bp);
2057
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002058 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059 * Returns the type of LOAD command:
2060 * if it is the first port to be initialized
2061 * common blocks should be initialized, otherwise - not
2062 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002063 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00002064 /* init fw_seq */
2065 bp->fw_seq =
2066 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2067 DRV_MSG_SEQ_NUMBER_MASK);
2068 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2069
2070 /* Get current FW pulse sequence */
2071 bp->fw_drv_pulse_wr_seq =
2072 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2073 DRV_PULSE_SEQ_MASK);
2074 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2075
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002076 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002077 if (!load_code) {
2078 BNX2X_ERR("MCP response failure, aborting\n");
2079 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002080 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002081 }
2082 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002083 BNX2X_ERR("Driver load refused\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002084 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002085 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002086 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00002087 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2088 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002089 /* abort nic load if version mismatch */
Yuval Mintz452427b2012-03-26 20:47:07 +00002090 if (!bnx2x_test_firmware_version(bp, true)) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002091 rc = -EBUSY;
2092 LOAD_ERROR_EXIT(bp, load_error2);
2093 }
2094 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002095
2096 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002097 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002098
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002099 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2100 path, load_count[path][0], load_count[path][1],
2101 load_count[path][2]);
2102 load_count[path][0]++;
2103 load_count[path][1 + port]++;
2104 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2105 path, load_count[path][0], load_count[path][1],
2106 load_count[path][2]);
2107 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002108 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002109 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002110 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2111 else
2112 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2113 }
2114
2115 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002116 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002117 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002118 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002119 /*
2120 * We need the barrier to ensure the ordering between the
2121 * writing to bp->port.pmf here and reading it from the
2122 * bnx2x_periodic_task().
2123 */
2124 smp_mb();
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002125 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002126 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002127
Merav Sicron51c1a582012-03-18 10:33:38 +00002128 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002129
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002130 /* Init Function state controlling object */
2131 bnx2x__init_func_obj(bp);
2132
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002133 /* Initialize HW */
2134 rc = bnx2x_init_hw(bp, load_code);
2135 if (rc) {
2136 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002137 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002138 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002139 }
2140
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002141 /* Connect to IRQs */
2142 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002143 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002144 BNX2X_ERR("IRQs setup failed\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002145 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002146 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002147 }
2148
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002149 /* Setup NIC internals and enable interrupts */
2150 bnx2x_nic_init(bp, load_code);
2151
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002152 /* Init per-function objects */
2153 bnx2x_init_bp_objs(bp);
2154
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002155 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2156 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002157 (bp->common.shmem2_base)) {
2158 if (SHMEM2_HAS(bp, dcc_support))
2159 SHMEM2_WR(bp, dcc_support,
2160 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2161 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
Barak Witkowskia3348722012-04-23 03:04:46 +00002162 if (SHMEM2_HAS(bp, afex_driver_support))
2163 SHMEM2_WR(bp, afex_driver_support,
2164 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002165 }
2166
Barak Witkowskia3348722012-04-23 03:04:46 +00002167 /* Set AFEX default VLAN tag to an invalid value */
2168 bp->afex_def_vlan_tag = -1;
2169
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002170 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2171 rc = bnx2x_func_start(bp);
2172 if (rc) {
2173 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00002174 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002175 LOAD_ERROR_EXIT(bp, load_error3);
2176 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002177
2178 /* Send LOAD_DONE command to MCP */
2179 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002180 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002181 if (!load_code) {
2182 BNX2X_ERR("MCP response failure, aborting\n");
2183 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002184 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002185 }
2186 }
2187
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002188 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002189 if (rc) {
2190 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002191 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002192 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002193
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002194#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002195 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002197#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002198
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002199 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Merav Sicron51c1a582012-03-18 10:33:38 +00002201 if (rc) {
2202 BNX2X_ERR("Queue setup failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002203 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002204 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002205 }
2206
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002207 rc = bnx2x_init_rss_pf(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002208 if (rc) {
2209 BNX2X_ERR("PF RSS init failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002210 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002211 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002212
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002213 /* Now when Clients are configured we are ready to work */
2214 bp->state = BNX2X_STATE_OPEN;
2215
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002216 /* Configure a ucast MAC */
2217 rc = bnx2x_set_eth_mac(bp, true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002218 if (rc) {
2219 BNX2X_ERR("Setting Ethernet MAC failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002220 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002221 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002222
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002223 if (bp->pending_max) {
2224 bnx2x_update_max_mf_config(bp, bp->pending_max);
2225 bp->pending_max = 0;
2226 }
2227
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002228 if (bp->port.pmf)
2229 bnx2x_initial_phy_init(bp, load_mode);
2230
2231 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002232
2233 /* Initialize Rx filter. */
2234 netif_addr_lock_bh(bp->dev);
2235 bnx2x_set_rx_mode(bp->dev);
2236 netif_addr_unlock_bh(bp->dev);
2237
2238 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002239 switch (load_mode) {
2240 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002241 /* Tx queue should be only reenabled */
2242 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002243 break;
2244
2245 case LOAD_OPEN:
2246 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002247 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002248 break;
2249
2250 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002251 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002252 bp->state = BNX2X_STATE_DIAG;
2253 break;
2254
2255 default:
2256 break;
2257 }
2258
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002259 if (bp->port.pmf)
Yuval Mintze695a2d2012-03-12 11:22:06 +00002260 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002261 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002262 bnx2x__link_status_update(bp);
2263
2264 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266
2267#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002268 /* re-read iscsi info */
2269 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002270 bnx2x_setup_cnic_irq_info(bp);
Merav Sicron37ae41a2012-06-19 07:48:27 +00002271 bnx2x_setup_cnic_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002275
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002276 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2278 u32 val;
2279 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2280 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2281 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2282 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2283 }
2284
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002285 /* Wait for all pending SP commands to complete */
2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2287 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2288 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2289 return -EBUSY;
2290 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002291
Barak Witkowski98768792012-06-19 07:48:31 +00002292 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false);
2295
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002296 return 0;
2297
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002298#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002299load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002300#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002301 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002303#endif
2304load_error3:
2305 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002306
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002307 /* Clean queueable objects */
2308 bnx2x_squeeze_objects(bp);
2309
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002310 /* Free SKBs, SGEs, TPA pool and driver internals */
2311 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002312 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002313 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002314
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002315 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002316 bnx2x_free_irq(bp);
2317load_error2:
2318 if (!BP_NOMCP(bp)) {
2319 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2320 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2321 }
2322
2323 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002324load_error1:
2325 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002326 /* clear pf_load status, as it was already set */
2327 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002328load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002329 bnx2x_free_mem(bp);
2330
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002331 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002332#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002333}
2334
2335/* must be called with rtnl_lock */
2336int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2337{
2338 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002339 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002340
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002341 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343 u32 val;
2344 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2345 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2346 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2347 }
2348
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002349 if ((bp->state == BNX2X_STATE_CLOSED) ||
2350 (bp->state == BNX2X_STATE_ERROR)) {
2351 /* We can get here if the driver has been unloaded
2352 * during parity error recovery and is either waiting for a
2353 * leader to complete or for other functions to unload and
2354 * then ifdown has been issued. In this case we want to
2355 * unload and let other functions to complete a recovery
2356 * process.
2357 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002358 bp->recovery_state = BNX2X_RECOVERY_DONE;
2359 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002360 bnx2x_release_leader_lock(bp);
2361 smp_mb();
2362
Merav Sicron51c1a582012-03-18 10:33:38 +00002363 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2364 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002365 return -EINVAL;
2366 }
2367
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002368 /*
2369 * It's important to set the bp->state to the value different from
2370 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2371 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2372 */
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374 smp_mb();
2375
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002376 /* Stop Tx */
2377 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002378 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002379
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002380#ifdef BCM_CNIC
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002383
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002384 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002385
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002386 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002387
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002388 /* Set ALWAYS_ALIVE bit in shmem */
2389 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2390
2391 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002392
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002393 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002394 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002395
2396 /* Cleanup the chip if needed */
2397 if (unload_mode != UNLOAD_RECOVERY)
2398 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002399 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002400 /* Send the UNLOAD_REQUEST to the MCP */
2401 bnx2x_send_unload_req(bp, unload_mode);
2402
2403 /*
2404 * Prevent transactions to host from the functions on the
2405 * engine that doesn't reset global blocks in case of global
2406 * attention once gloabl blocks are reset and gates are opened
2407 * (the engine which leader will perform the recovery
2408 * last).
2409 */
2410 if (!CHIP_IS_E1x(bp))
2411 bnx2x_pf_disable(bp);
2412
2413 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002414 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002415 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002417
2418 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002419 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002420
2421 /* Report UNLOAD_DONE to MCP */
2422 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002423 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002424
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002425 /*
2426 * At this stage no more interrupts will arrive so we may safly clean
2427 * the queueable objects here in case they failed to get cleaned so far.
2428 */
2429 bnx2x_squeeze_objects(bp);
2430
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002431 /* There should be no more pending SP commands at this stage */
2432 bp->sp_state = 0;
2433
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002434 bp->port.pmf = 0;
2435
2436 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002438 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002440
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002441 bnx2x_free_mem(bp);
2442
2443 bp->state = BNX2X_STATE_CLOSED;
2444
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002445 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS.
2447 */
2448 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2449 bnx2x_set_reset_in_progress(bp);
2450
2451 /* Set RESET_IS_GLOBAL if needed */
2452 if (global)
2453 bnx2x_set_reset_global(bp);
2454 }
2455
2456
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002457 /* The last driver must disable a "close the gate" if there is no
2458 * parity attention or "process kill" pending.
2459 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002461 bnx2x_disable_close_the_gate(bp);
2462
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002463 return 0;
2464}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002465
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002466int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2467{
2468 u16 pmcsr;
2469
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002470 /* If there is no power capability, silently succeed */
2471 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002472 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002473 return 0;
2474 }
2475
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002476 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2477
2478 switch (state) {
2479 case PCI_D0:
2480 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2481 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2482 PCI_PM_CTRL_PME_STATUS));
2483
2484 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2485 /* delay required during transition out of D3hot */
2486 msleep(20);
2487 break;
2488
2489 case PCI_D3hot:
2490 /* If there are other clients above don't
2491 shut down the power */
2492 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2493 return 0;
2494 /* Don't shut down the power for emulation and FPGA */
2495 if (CHIP_REV_IS_SLOW(bp))
2496 return 0;
2497
2498 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2499 pmcsr |= 3;
2500
2501 if (bp->wol)
2502 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2503
2504 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2505 pmcsr);
2506
2507 /* No more memory access after this point until
2508 * device is brought back to D0.
2509 */
2510 break;
2511
2512 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00002513 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002514 return -EINVAL;
2515 }
2516 return 0;
2517}
2518
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002519/*
2520 * net_device service functions
2521 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002522int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002523{
2524 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002525 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002526 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2527 napi);
2528 struct bnx2x *bp = fp->bp;
2529
2530 while (1) {
2531#ifdef BNX2X_STOP_ON_ERROR
2532 if (unlikely(bp->panic)) {
2533 napi_complete(napi);
2534 return 0;
2535 }
2536#endif
2537
Ariel Elior6383c0b2011-07-14 08:31:57 +00002538 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00002539 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2540 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002541
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002542
2543 if (bnx2x_has_rx_work(fp)) {
2544 work_done += bnx2x_rx_int(fp, budget - work_done);
2545
2546 /* must not complete if we consumed full budget */
2547 if (work_done >= budget)
2548 break;
2549 }
2550
2551 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002553#ifdef BCM_CNIC
2554 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled.
2557 */
2558 if (IS_FCOE_FP(fp)) {
2559 napi_complete(napi);
2560 break;
2561 }
2562#endif
2563
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002564 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002565 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices
2567 * have been actually read (bnx2x_update_fpsb_idx)
2568 * prior to this check (bnx2x_has_rx_work) so that
2569 * we won't write the "newer" value of the status block
2570 * to IGU (if there was a DMA right after
2571 * bnx2x_has_rx_work and if there is no rmb, the memory
2572 * reading (bnx2x_update_fpsb_idx) may be postponed
2573 * to right before bnx2x_ack_sb). In this case there
2574 * will never be another interrupt until there is
2575 * another update of the status block, while there
2576 * is still unhandled work.
2577 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002578 rmb();
2579
2580 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2581 napi_complete(napi);
2582 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00002583 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002584 "Update index to %d\n", fp->fp_hc_idx);
2585 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2586 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002587 IGU_INT_ENABLE, 1);
2588 break;
2589 }
2590 }
2591 }
2592
2593 return work_done;
2594}
2595
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002596/* we split the first BD into headers and data BDs
2597 * to ease the pain of our fellow microcode engineers
2598 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002599 */
2600static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002601 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002602 struct sw_tx_bd *tx_buf,
2603 struct eth_tx_start_bd **tx_bd, u16 hlen,
2604 u16 bd_prod, int nbd)
2605{
2606 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2607 struct eth_tx_bd *d_tx_bd;
2608 dma_addr_t mapping;
2609 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2610
2611 /* first fix first BD */
2612 h_tx_bd->nbd = cpu_to_le16(nbd);
2613 h_tx_bd->nbytes = cpu_to_le16(hlen);
2614
Merav Sicron51c1a582012-03-18 10:33:38 +00002615 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2616 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002617
2618 /* now get a new data BD
2619 * (after the pbd) and fill it */
2620 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002621 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002622
2623 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2624 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2625
2626 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2627 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2628 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2629
2630 /* this marks the BD as one that has no individual mapping */
2631 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2632
2633 DP(NETIF_MSG_TX_QUEUED,
2634 "TSO split data size is %d (%x:%x)\n",
2635 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2636
2637 /* update tx_bd */
2638 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2639
2640 return bd_prod;
2641}
2642
2643static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2644{
2645 if (fix > 0)
2646 csum = (u16) ~csum_fold(csum_sub(csum,
2647 csum_partial(t_header - fix, fix, 0)));
2648
2649 else if (fix < 0)
2650 csum = (u16) ~csum_fold(csum_add(csum,
2651 csum_partial(t_header, -fix, 0)));
2652
2653 return swab16(csum);
2654}
2655
2656static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2657{
2658 u32 rc;
2659
2660 if (skb->ip_summed != CHECKSUM_PARTIAL)
2661 rc = XMIT_PLAIN;
2662
2663 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002664 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002665 rc = XMIT_CSUM_V6;
2666 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2667 rc |= XMIT_CSUM_TCP;
2668
2669 } else {
2670 rc = XMIT_CSUM_V4;
2671 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2672 rc |= XMIT_CSUM_TCP;
2673 }
2674 }
2675
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002676 if (skb_is_gso_v6(skb))
2677 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2678 else if (skb_is_gso(skb))
2679 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002680
2681 return rc;
2682}
2683
2684#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2685/* check if packet requires linearization (packet is too fragmented)
2686 no need to check fragmentation if page size > 8K (there will be no
2687 violation to FW restrictions) */
2688static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2689 u32 xmit_type)
2690{
2691 int to_copy = 0;
2692 int hlen = 0;
2693 int first_bd_sz = 0;
2694
2695 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2696 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2697
2698 if (xmit_type & XMIT_GSO) {
2699 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2700 /* Check if LSO packet needs to be copied:
2701 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2702 int wnd_size = MAX_FETCH_BD - 3;
2703 /* Number of windows to check */
2704 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2705 int wnd_idx = 0;
2706 int frag_idx = 0;
2707 u32 wnd_sum = 0;
2708
2709 /* Headers length */
2710 hlen = (int)(skb_transport_header(skb) - skb->data) +
2711 tcp_hdrlen(skb);
2712
2713 /* Amount of data (w/o headers) on linear part of SKB*/
2714 first_bd_sz = skb_headlen(skb) - hlen;
2715
2716 wnd_sum = first_bd_sz;
2717
2718 /* Calculate the first sum - it's special */
2719 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2720 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002721 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002722
2723 /* If there was data on linear skb data - check it */
2724 if (first_bd_sz > 0) {
2725 if (unlikely(wnd_sum < lso_mss)) {
2726 to_copy = 1;
2727 goto exit_lbl;
2728 }
2729
2730 wnd_sum -= first_bd_sz;
2731 }
2732
2733 /* Others are easier: run through the frag list and
2734 check all windows */
2735 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2736 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002737 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002738
2739 if (unlikely(wnd_sum < lso_mss)) {
2740 to_copy = 1;
2741 break;
2742 }
2743 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002744 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002745 }
2746 } else {
2747 /* in non-LSO too fragmented packet should always
2748 be linearized */
2749 to_copy = 1;
2750 }
2751 }
2752
2753exit_lbl:
2754 if (unlikely(to_copy))
2755 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00002756 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002757 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2758 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2759
2760 return to_copy;
2761}
2762#endif
2763
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002764static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2765 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002766{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002767 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2768 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2769 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002770 if ((xmit_type & XMIT_GSO_V6) &&
2771 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002772 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002773}
2774
2775/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002776 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002777 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002778 * @skb: packet skb
2779 * @pbd: parse BD
2780 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002781 */
2782static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2783 struct eth_tx_parse_bd_e1x *pbd,
2784 u32 xmit_type)
2785{
2786 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2787 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2788 pbd->tcp_flags = pbd_tcp_flags(skb);
2789
2790 if (xmit_type & XMIT_GSO_V4) {
2791 pbd->ip_id = swab16(ip_hdr(skb)->id);
2792 pbd->tcp_pseudo_csum =
2793 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2794 ip_hdr(skb)->daddr,
2795 0, IPPROTO_TCP, 0));
2796
2797 } else
2798 pbd->tcp_pseudo_csum =
2799 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2800 &ipv6_hdr(skb)->daddr,
2801 0, IPPROTO_TCP, 0));
2802
2803 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2804}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002805
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002806/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002807 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002808 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002809 * @bp: driver handle
2810 * @skb: packet skb
2811 * @parsing_data: data to be updated
2812 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002813 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002814 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002815 */
2816static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002817 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002818{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002819 *parsing_data |=
2820 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2821 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2822 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002823
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002824 if (xmit_type & XMIT_CSUM_TCP) {
2825 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2826 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2827 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002828
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002829 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2830 } else
2831 /* We support checksum offload for TCP and UDP only.
2832 * No need to pass the UDP header length - it's a constant.
2833 */
2834 return skb_transport_header(skb) +
2835 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002836}
2837
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002838static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2839 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2840{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002841 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2842
2843 if (xmit_type & XMIT_CSUM_V4)
2844 tx_start_bd->bd_flags.as_bitfield |=
2845 ETH_TX_BD_FLAGS_IP_CSUM;
2846 else
2847 tx_start_bd->bd_flags.as_bitfield |=
2848 ETH_TX_BD_FLAGS_IPV6;
2849
2850 if (!(xmit_type & XMIT_CSUM_TCP))
2851 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002852}
2853
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002854/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002855 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002856 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002857 * @bp: driver handle
2858 * @skb: packet skb
2859 * @pbd: parse BD to be updated
2860 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002861 */
2862static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2863 struct eth_tx_parse_bd_e1x *pbd,
2864 u32 xmit_type)
2865{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002866 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002867
2868 /* for now NS flag is not used in Linux */
2869 pbd->global_data =
2870 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2871 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2872
2873 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002874 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002875
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002876 hlen += pbd->ip_hlen_w;
2877
2878 /* We support checksum offload for TCP and UDP only */
2879 if (xmit_type & XMIT_CSUM_TCP)
2880 hlen += tcp_hdrlen(skb) / 2;
2881 else
2882 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002883
2884 pbd->total_hlen_w = cpu_to_le16(hlen);
2885 hlen = hlen*2;
2886
2887 if (xmit_type & XMIT_CSUM_TCP) {
2888 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2889
2890 } else {
2891 s8 fix = SKB_CS_OFF(skb); /* signed! */
2892
2893 DP(NETIF_MSG_TX_QUEUED,
2894 "hlen %d fix %d csum before fix %x\n",
2895 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2896
2897 /* HW bug: fixup the CSUM */
2898 pbd->tcp_pseudo_csum =
2899 bnx2x_csum_fix(skb_transport_header(skb),
2900 SKB_CS(skb), fix);
2901
2902 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2903 pbd->tcp_pseudo_csum);
2904 }
2905
2906 return hlen;
2907}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002908
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002909/* called with netif_tx_lock
2910 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2911 * netif_wake_queue()
2912 */
2913netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2914{
2915 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002916
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002917 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002918 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002919 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002920 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002921 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002922 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002923 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002924 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002925 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00002926 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002927 dma_addr_t mapping;
2928 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2929 int i;
2930 u8 hlen = 0;
2931 __le16 pkt_size = 0;
2932 struct ethhdr *eth;
2933 u8 mac_type = UNICAST_ADDRESS;
2934
2935#ifdef BNX2X_STOP_ON_ERROR
2936 if (unlikely(bp->panic))
2937 return NETDEV_TX_BUSY;
2938#endif
2939
Ariel Elior6383c0b2011-07-14 08:31:57 +00002940 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942
Ariel Elior6383c0b2011-07-14 08:31:57 +00002943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2944
Merav Sicron65565882012-06-19 07:48:26 +00002945 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002946
2947 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00002948 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002949 txq_index, fp_index, txdata_index); */
2950
Ariel Elior6383c0b2011-07-14 08:31:57 +00002951 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00002952 DP(NETIF_MSG_TX_QUEUED,
2953 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002954 txdata->cid, fp_index, txdata_index, txdata, fp); */
2955
2956 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00002957 skb_shinfo(skb)->nr_frags +
2958 BDS_PER_TX_PKT +
2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002960 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002961 netif_tx_stop_queue(txq);
2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2963 return NETDEV_TX_BUSY;
2964 }
2965
Merav Sicron51c1a582012-03-18 10:33:38 +00002966 DP(NETIF_MSG_TX_QUEUED,
2967 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002968 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002969 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2970
2971 eth = (struct ethhdr *)skb->data;
2972
2973 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2974 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2975 if (is_broadcast_ether_addr(eth->h_dest))
2976 mac_type = BROADCAST_ADDRESS;
2977 else
2978 mac_type = MULTICAST_ADDRESS;
2979 }
2980
2981#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2982 /* First, check if we need to linearize the skb (due to FW
2983 restrictions). No need to check fragmentation if page size > 8K
2984 (there will be no violation to FW restrictions) */
2985 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2986 /* Statistics of linearization */
2987 bp->lin_cnt++;
2988 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002989 DP(NETIF_MSG_TX_QUEUED,
2990 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002991 dev_kfree_skb_any(skb);
2992 return NETDEV_TX_OK;
2993 }
2994 }
2995#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002996 /* Map skb linear data for DMA */
2997 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2998 skb_headlen(skb), DMA_TO_DEVICE);
2999 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003000 DP(NETIF_MSG_TX_QUEUED,
3001 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003002 dev_kfree_skb_any(skb);
3003 return NETDEV_TX_OK;
3004 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003005 /*
3006 Please read carefully. First we use one BD which we mark as start,
3007 then we have a parsing info BD (used for TSO or xsum),
3008 and only then we have the rest of the TSO BDs.
3009 (don't forget to mark the last one as last,
3010 and to unmap only AFTER you write to the BD ...)
3011 And above all, all pdb sizes are in words - NOT DWORDS!
3012 */
3013
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003014 /* get current pkt produced now - advance it just before sending packet
3015 * since mapping of pages may fail and cause packet to be dropped
3016 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003017 pkt_prod = txdata->tx_pkt_prod;
3018 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003019
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003020 /* get a tx_buf and first BD
3021 * tx_start_bd may be changed during SPLIT,
3022 * but first_bd will always stay first
3023 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003024 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3025 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003026 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003027
3028 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003029 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
3030 mac_type);
3031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003032 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003033 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003034
3035 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003036 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003037 tx_buf->skb = skb;
3038 tx_buf->flags = 0;
3039
3040 DP(NETIF_MSG_TX_QUEUED,
3041 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003042 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003043
Jesse Grosseab6d182010-10-20 13:56:03 +00003044 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003045 tx_start_bd->vlan_or_ethertype =
3046 cpu_to_le16(vlan_tx_tag_get(skb));
3047 tx_start_bd->bd_flags.as_bitfield |=
3048 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003049 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003050 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003051
3052 /* turn on parsing and get a BD */
3053 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003054
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003055 if (xmit_type & XMIT_CSUM)
3056 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003057
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003058 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003059 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003060 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3061 /* Set PBD in checksum offload case */
3062 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003063 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3064 &pbd_e2_parsing_data,
3065 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003066 if (IS_MF_SI(bp)) {
3067 /*
3068 * fill in the MAC addresses in the PBD - for local
3069 * switching
3070 */
3071 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3072 &pbd_e2->src_mac_addr_mid,
3073 &pbd_e2->src_mac_addr_lo,
3074 eth->h_source);
3075 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3076 &pbd_e2->dst_mac_addr_mid,
3077 &pbd_e2->dst_mac_addr_lo,
3078 eth->h_dest);
3079 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003080 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003081 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003082 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3083 /* Set PBD in checksum offload case */
3084 if (xmit_type & XMIT_CSUM)
3085 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003086
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003087 }
3088
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003089 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003090 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3091 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003092 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003093 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3094 pkt_size = tx_start_bd->nbytes;
3095
Merav Sicron51c1a582012-03-18 10:33:38 +00003096 DP(NETIF_MSG_TX_QUEUED,
3097 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003098 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3099 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003100 tx_start_bd->bd_flags.as_bitfield,
3101 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003102
3103 if (xmit_type & XMIT_GSO) {
3104
3105 DP(NETIF_MSG_TX_QUEUED,
3106 "TSO packet len %d hlen %d total len %d tso size %d\n",
3107 skb->len, hlen, skb_headlen(skb),
3108 skb_shinfo(skb)->gso_size);
3109
3110 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3111
3112 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003113 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3114 &tx_start_bd, hlen,
3115 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003116 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003117 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3118 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003119 else
3120 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003122
3123 /* Set the PBD's parsing_data field if not zero
3124 * (for the chips newer than 57711).
3125 */
3126 if (pbd_e2_parsing_data)
3127 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003129 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3130
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003131 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003132 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3134
Eric Dumazet9e903e02011-10-18 21:00:24 +00003135 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3136 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003137 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003138 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003139
Merav Sicron51c1a582012-03-18 10:33:38 +00003140 DP(NETIF_MSG_TX_QUEUED,
3141 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003142
3143 /* we need unmap all buffers already mapped
3144 * for this SKB;
3145 * first_bd->nbd need to be properly updated
3146 * before call to bnx2x_free_tx_pkt
3147 */
3148 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003149 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003150 TX_BD(txdata->tx_pkt_prod),
3151 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003152 return NETDEV_TX_OK;
3153 }
3154
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003155 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003156 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003157 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003158 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003159
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003160 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3161 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003162 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3163 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003164 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003165
3166 DP(NETIF_MSG_TX_QUEUED,
3167 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3168 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3169 le16_to_cpu(tx_data_bd->nbytes));
3170 }
3171
3172 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3173
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003174 /* update with actual num BDs */
3175 first_bd->nbd = cpu_to_le16(nbd);
3176
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003177 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3178
3179 /* now send a tx doorbell, counting the next BD
3180 * if the packet contains or ends with it
3181 */
3182 if (TX_BD_POFF(bd_prod) < nbd)
3183 nbd++;
3184
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003185 /* total_pkt_bytes should be set on the first data BD if
3186 * it's not an LSO packet and there is more than one
3187 * data BD. In this case pkt_size is limited by an MTU value.
3188 * However we prefer to set it for an LSO packet (while we don't
3189 * have to) in order to save some CPU cycles in a none-LSO
3190 * case, when we much more care about them.
3191 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003192 if (total_pkt_bd != NULL)
3193 total_pkt_bd->total_pkt_bytes = pkt_size;
3194
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003195 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003196 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003197 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003198 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3199 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3200 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3201 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003202 if (pbd_e2)
3203 DP(NETIF_MSG_TX_QUEUED,
3204 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3205 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3206 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3207 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3208 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003209 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3210
Tom Herbert2df1a702011-11-28 16:33:37 +00003211 netdev_tx_sent_queue(txq, skb->len);
3212
Willem de Bruijn8373c572012-04-27 09:04:06 +00003213 skb_tx_timestamp(skb);
3214
Ariel Elior6383c0b2011-07-14 08:31:57 +00003215 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003216 /*
3217 * Make sure that the BD data is updated before updating the producer
3218 * since FW might read the BD right after the producer is updated.
3219 * This is only applicable for weak-ordered memory model archs such
3220 * as IA-64. The following barrier is also mandatory since FW will
3221 * assumes packets must have BDs.
3222 */
3223 wmb();
3224
Ariel Elior6383c0b2011-07-14 08:31:57 +00003225 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003226 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003227
Ariel Elior6383c0b2011-07-14 08:31:57 +00003228 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003229
3230 mmiowb();
3231
Ariel Elior6383c0b2011-07-14 08:31:57 +00003232 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003233
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003234 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003235 netif_tx_stop_queue(txq);
3236
3237 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3238 * ordering of set_bit() in netif_tx_stop_queue() and read of
3239 * fp->bd_tx_cons */
3240 smp_mb();
3241
Barak Witkowski15192a82012-06-19 07:48:28 +00003242 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003243 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003244 netif_tx_wake_queue(txq);
3245 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003246 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003247
3248 return NETDEV_TX_OK;
3249}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003250
Ariel Elior6383c0b2011-07-14 08:31:57 +00003251/**
3252 * bnx2x_setup_tc - routine to configure net_device for multi tc
3253 *
3254 * @netdev: net device to configure
3255 * @tc: number of traffic classes to enable
3256 *
3257 * callback connected to the ndo_setup_tc function pointer
3258 */
3259int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3260{
3261 int cos, prio, count, offset;
3262 struct bnx2x *bp = netdev_priv(dev);
3263
3264 /* setup tc must be called under rtnl lock */
3265 ASSERT_RTNL();
3266
3267 /* no traffic classes requested. aborting */
3268 if (!num_tc) {
3269 netdev_reset_tc(dev);
3270 return 0;
3271 }
3272
3273 /* requested to support too many traffic classes */
3274 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003275 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3276 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003277 return -EINVAL;
3278 }
3279
3280 /* declare amount of supported traffic classes */
3281 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003282 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003283 return -EINVAL;
3284 }
3285
3286 /* configure priority to traffic class mapping */
3287 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3288 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003289 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3290 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003291 prio, bp->prio_to_cos[prio]);
3292 }
3293
3294
3295 /* Use this configuration to diffrentiate tc0 from other COSes
3296 This can be used for ets or pfc, and save the effort of setting
3297 up a multio class queue disc or negotiating DCBX with a switch
3298 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003299 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003300 for (prio = 1; prio < 16; prio++) {
3301 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003302 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003303 } */
3304
3305 /* configure traffic class to transmission queue mapping */
3306 for (cos = 0; cos < bp->max_cos; cos++) {
3307 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003308 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003309 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003310 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3311 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003312 cos, offset, count);
3313 }
3314
3315 return 0;
3316}
3317
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003318/* called with rtnl_lock */
3319int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3320{
3321 struct sockaddr *addr = p;
3322 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003323 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003324
Merav Sicron51c1a582012-03-18 10:33:38 +00003325 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3326 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003327 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003328 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003329
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003330#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003331 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3332 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003333 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003334 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003335 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003336#endif
3337
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003338 if (netif_running(dev)) {
3339 rc = bnx2x_set_eth_mac(bp, false);
3340 if (rc)
3341 return rc;
3342 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003343
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003344 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003345 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3346
3347 if (netif_running(dev))
3348 rc = bnx2x_set_eth_mac(bp, true);
3349
3350 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003351}
3352
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003353static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3354{
3355 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3356 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003357 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003358
3359 /* Common */
3360#ifdef BCM_CNIC
3361 if (IS_FCOE_IDX(fp_index)) {
3362 memset(sb, 0, sizeof(union host_hc_status_block));
3363 fp->status_blk_mapping = 0;
3364
3365 } else {
3366#endif
3367 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003368 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003369 BNX2X_PCI_FREE(sb->e2_sb,
3370 bnx2x_fp(bp, fp_index,
3371 status_blk_mapping),
3372 sizeof(struct host_hc_status_block_e2));
3373 else
3374 BNX2X_PCI_FREE(sb->e1x_sb,
3375 bnx2x_fp(bp, fp_index,
3376 status_blk_mapping),
3377 sizeof(struct host_hc_status_block_e1x));
3378#ifdef BCM_CNIC
3379 }
3380#endif
3381 /* Rx */
3382 if (!skip_rx_queue(bp, fp_index)) {
3383 bnx2x_free_rx_bds(fp);
3384
3385 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3386 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3387 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3388 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3389 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3390
3391 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3392 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3393 sizeof(struct eth_fast_path_rx_cqe) *
3394 NUM_RCQ_BD);
3395
3396 /* SGE ring */
3397 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3398 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3399 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3400 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3401 }
3402
3403 /* Tx */
3404 if (!skip_tx_queue(bp, fp_index)) {
3405 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003406 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003407 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003408
Merav Sicron51c1a582012-03-18 10:33:38 +00003409 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003410 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003411 fp_index, cos, txdata->cid);
3412
3413 BNX2X_FREE(txdata->tx_buf_ring);
3414 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3415 txdata->tx_desc_mapping,
3416 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3417 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003418 }
3419 /* end of fastpath */
3420}
3421
3422void bnx2x_free_fp_mem(struct bnx2x *bp)
3423{
3424 int i;
3425 for_each_queue(bp, i)
3426 bnx2x_free_fp_mem_at(bp, i);
3427}
3428
Eric Dumazet1191cb82012-04-27 21:39:21 +00003429static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003430{
3431 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003432 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003433 bnx2x_fp(bp, index, sb_index_values) =
3434 (__le16 *)status_blk.e2_sb->sb.index_values;
3435 bnx2x_fp(bp, index, sb_running_index) =
3436 (__le16 *)status_blk.e2_sb->sb.running_index;
3437 } else {
3438 bnx2x_fp(bp, index, sb_index_values) =
3439 (__le16 *)status_blk.e1x_sb->sb.index_values;
3440 bnx2x_fp(bp, index, sb_running_index) =
3441 (__le16 *)status_blk.e1x_sb->sb.running_index;
3442 }
3443}
3444
Eric Dumazet1191cb82012-04-27 21:39:21 +00003445/* Returns the number of actually allocated BDs */
3446static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3447 int rx_ring_size)
3448{
3449 struct bnx2x *bp = fp->bp;
3450 u16 ring_prod, cqe_ring_prod;
3451 int i, failure_cnt = 0;
3452
3453 fp->rx_comp_cons = 0;
3454 cqe_ring_prod = ring_prod = 0;
3455
3456 /* This routine is called only during fo init so
3457 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3458 */
3459 for (i = 0; i < rx_ring_size; i++) {
3460 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3461 failure_cnt++;
3462 continue;
3463 }
3464 ring_prod = NEXT_RX_IDX(ring_prod);
3465 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3466 WARN_ON(ring_prod <= (i - failure_cnt));
3467 }
3468
3469 if (failure_cnt)
3470 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3471 i - failure_cnt, fp->index);
3472
3473 fp->rx_bd_prod = ring_prod;
3474 /* Limit the CQE producer by the CQE ring size */
3475 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3476 cqe_ring_prod);
3477 fp->rx_pkt = fp->rx_calls = 0;
3478
Barak Witkowski15192a82012-06-19 07:48:28 +00003479 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00003480
3481 return i - failure_cnt;
3482}
3483
3484static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3485{
3486 int i;
3487
3488 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3489 struct eth_rx_cqe_next_page *nextpg;
3490
3491 nextpg = (struct eth_rx_cqe_next_page *)
3492 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3493 nextpg->addr_hi =
3494 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3495 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3496 nextpg->addr_lo =
3497 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3498 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3499 }
3500}
3501
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003502static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3503{
3504 union host_hc_status_block *sb;
3505 struct bnx2x_fastpath *fp = &bp->fp[index];
3506 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003507 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003508 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003509
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003510#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003511 if (!bp->rx_ring_size &&
3512 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003513 rx_ring_size = MIN_RX_SIZE_NONTPA;
3514 bp->rx_ring_size = rx_ring_size;
3515 } else
3516#endif
David S. Miller8decf862011-09-22 03:23:13 -04003517 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003518 u32 cfg = SHMEM_RD(bp,
3519 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003520
David S. Miller8decf862011-09-22 03:23:13 -04003521 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3522
Mintz Yuvald760fc32012-02-15 02:10:28 +00003523 /* Dercease ring size for 1G functions */
3524 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3525 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3526 rx_ring_size /= 10;
3527
David S. Miller8decf862011-09-22 03:23:13 -04003528 /* allocate at least number of buffers required by FW */
3529 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3530 MIN_RX_SIZE_TPA, rx_ring_size);
3531
3532 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003533 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003534 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003535
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003536 /* Common */
3537 sb = &bnx2x_fp(bp, index, status_blk);
3538#ifdef BCM_CNIC
3539 if (!IS_FCOE_IDX(index)) {
3540#endif
3541 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003542 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003543 BNX2X_PCI_ALLOC(sb->e2_sb,
3544 &bnx2x_fp(bp, index, status_blk_mapping),
3545 sizeof(struct host_hc_status_block_e2));
3546 else
3547 BNX2X_PCI_ALLOC(sb->e1x_sb,
3548 &bnx2x_fp(bp, index, status_blk_mapping),
3549 sizeof(struct host_hc_status_block_e1x));
3550#ifdef BCM_CNIC
3551 }
3552#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003553
3554 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3555 * set shortcuts for it.
3556 */
3557 if (!IS_FCOE_IDX(index))
3558 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003559
3560 /* Tx */
3561 if (!skip_tx_queue(bp, index)) {
3562 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003563 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003564 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003565
Merav Sicron51c1a582012-03-18 10:33:38 +00003566 DP(NETIF_MSG_IFUP,
3567 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003568 index, cos);
3569
3570 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003571 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003572 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3573 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003574 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003575 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003576 }
3577
3578 /* Rx */
3579 if (!skip_rx_queue(bp, index)) {
3580 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3581 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3582 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3583 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3584 &bnx2x_fp(bp, index, rx_desc_mapping),
3585 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3586
3587 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3588 &bnx2x_fp(bp, index, rx_comp_mapping),
3589 sizeof(struct eth_fast_path_rx_cqe) *
3590 NUM_RCQ_BD);
3591
3592 /* SGE ring */
3593 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3594 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3595 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3596 &bnx2x_fp(bp, index, rx_sge_mapping),
3597 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3598 /* RX BD ring */
3599 bnx2x_set_next_page_rx_bd(fp);
3600
3601 /* CQ ring */
3602 bnx2x_set_next_page_rx_cq(fp);
3603
3604 /* BDs */
3605 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3606 if (ring_size < rx_ring_size)
3607 goto alloc_mem_err;
3608 }
3609
3610 return 0;
3611
3612/* handles low memory cases */
3613alloc_mem_err:
3614 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3615 index, ring_size);
3616 /* FW will drop all packets if queue is not big enough,
3617 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003618 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003619 */
3620 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003621 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003622 /* release memory allocated for this queue */
3623 bnx2x_free_fp_mem_at(bp, index);
3624 return -ENOMEM;
3625 }
3626 return 0;
3627}
3628
3629int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3630{
3631 int i;
3632
3633 /**
3634 * 1. Allocate FP for leading - fatal if error
3635 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003636 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3637 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003638 */
3639
3640 /* leading */
3641 if (bnx2x_alloc_fp_mem_at(bp, 0))
3642 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003643
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003644#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003645 if (!NO_FCOE(bp))
3646 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00003647 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003648 /* we will fail load process instead of mark
3649 * NO_FCOE_FLAG
3650 */
3651 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003652#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003653
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003654 /* RSS */
3655 for_each_nondefault_eth_queue(bp, i)
3656 if (bnx2x_alloc_fp_mem_at(bp, i))
3657 break;
3658
3659 /* handle memory failures */
3660 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3661 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3662
3663 WARN_ON(delta < 0);
3664#ifdef BCM_CNIC
3665 /**
3666 * move non eth FPs next to last eth FP
3667 * must be done in that order
3668 * FCOE_IDX < FWD_IDX < OOO_IDX
3669 */
3670
Ariel Elior6383c0b2011-07-14 08:31:57 +00003671 /* move FCoE fp even NO_FCOE_FLAG is on */
Merav Sicron65565882012-06-19 07:48:26 +00003672 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003673#endif
3674 bp->num_queues -= delta;
3675 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3676 bp->num_queues + delta, bp->num_queues);
3677 }
3678
3679 return 0;
3680}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003681
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003682void bnx2x_free_mem_bp(struct bnx2x *bp)
3683{
Barak Witkowski15192a82012-06-19 07:48:28 +00003684 kfree(bp->fp->tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003685 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00003686 kfree(bp->sp_objs);
3687 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00003688 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003689 kfree(bp->msix_table);
3690 kfree(bp->ilt);
3691}
3692
3693int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3694{
3695 struct bnx2x_fastpath *fp;
3696 struct msix_entry *tbl;
3697 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003698 int msix_table_size = 0;
Barak Witkowski15192a82012-06-19 07:48:28 +00003699 int fp_array_size;
3700 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003701
Ariel Elior6383c0b2011-07-14 08:31:57 +00003702 /*
3703 * The biggest MSI-X table we might need is as a maximum number of fast
3704 * path IGU SBs plus default SB (for PF).
3705 */
3706 msix_table_size = bp->igu_sb_cnt + 1;
3707
3708 /* fp array: RSS plus CNIC related L2 queues */
Barak Witkowski15192a82012-06-19 07:48:28 +00003709 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3710 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3711
3712 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003713 if (!fp)
3714 goto alloc_err;
Barak Witkowski15192a82012-06-19 07:48:28 +00003715 for (i = 0; i < fp_array_size; i++) {
3716 fp[i].tpa_info =
3717 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3718 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3719 if (!(fp[i].tpa_info))
3720 goto alloc_err;
3721 }
3722
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003723 bp->fp = fp;
3724
Barak Witkowski15192a82012-06-19 07:48:28 +00003725 /* allocate sp objs */
3726 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3727 GFP_KERNEL);
3728 if (!bp->sp_objs)
3729 goto alloc_err;
3730
3731 /* allocate fp_stats */
3732 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3733 GFP_KERNEL);
3734 if (!bp->fp_stats)
3735 goto alloc_err;
3736
Merav Sicron65565882012-06-19 07:48:26 +00003737 /* Allocate memory for the transmission queues array */
3738 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3739#ifdef BCM_CNIC
3740 bp->bnx2x_txq_size++;
3741#endif
3742 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3743 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3744 if (!bp->bnx2x_txq)
3745 goto alloc_err;
3746
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003747 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003748 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003749 if (!tbl)
3750 goto alloc_err;
3751 bp->msix_table = tbl;
3752
3753 /* ilt */
3754 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3755 if (!ilt)
3756 goto alloc_err;
3757 bp->ilt = ilt;
3758
3759 return 0;
3760alloc_err:
3761 bnx2x_free_mem_bp(bp);
3762 return -ENOMEM;
3763
3764}
3765
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003766int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003767{
3768 struct bnx2x *bp = netdev_priv(dev);
3769
3770 if (unlikely(!netif_running(dev)))
3771 return 0;
3772
3773 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3774 return bnx2x_nic_load(bp, LOAD_NORMAL);
3775}
3776
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003777int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3778{
3779 u32 sel_phy_idx = 0;
3780 if (bp->link_params.num_phys <= 1)
3781 return INT_PHY;
3782
3783 if (bp->link_vars.link_up) {
3784 sel_phy_idx = EXT_PHY1;
3785 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3786 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3787 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3788 sel_phy_idx = EXT_PHY2;
3789 } else {
3790
3791 switch (bnx2x_phy_selection(&bp->link_params)) {
3792 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3793 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3794 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3795 sel_phy_idx = EXT_PHY1;
3796 break;
3797 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3798 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3799 sel_phy_idx = EXT_PHY2;
3800 break;
3801 }
3802 }
3803
3804 return sel_phy_idx;
3805
3806}
3807int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3808{
3809 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3810 /*
3811 * The selected actived PHY is always after swapping (in case PHY
3812 * swapping is enabled). So when swapping is enabled, we need to reverse
3813 * the configuration
3814 */
3815
3816 if (bp->link_params.multi_phy_config &
3817 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3818 if (sel_phy_idx == EXT_PHY1)
3819 sel_phy_idx = EXT_PHY2;
3820 else if (sel_phy_idx == EXT_PHY2)
3821 sel_phy_idx = EXT_PHY1;
3822 }
3823 return LINK_CONFIG_IDX(sel_phy_idx);
3824}
3825
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003826#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3827int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3828{
3829 struct bnx2x *bp = netdev_priv(dev);
3830 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3831
3832 switch (type) {
3833 case NETDEV_FCOE_WWNN:
3834 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3835 cp->fcoe_wwn_node_name_lo);
3836 break;
3837 case NETDEV_FCOE_WWPN:
3838 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3839 cp->fcoe_wwn_port_name_lo);
3840 break;
3841 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003842 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003843 return -EINVAL;
3844 }
3845
3846 return 0;
3847}
3848#endif
3849
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003850/* called with rtnl_lock */
3851int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3852{
3853 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003854
3855 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003856 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003857 return -EAGAIN;
3858 }
3859
3860 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00003861 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3862 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003863 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003864 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003865
3866 /* This does not race with packet allocation
3867 * because the actual alloc size is
3868 * only updated as part of load
3869 */
3870 dev->mtu = new_mtu;
3871
Michał Mirosław66371c42011-04-12 09:38:23 +00003872 return bnx2x_reload_if_running(dev);
3873}
3874
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003875netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003876 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003877{
3878 struct bnx2x *bp = netdev_priv(dev);
3879
3880 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003881 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003882 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003883 features &= ~NETIF_F_GRO;
3884 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003885
3886 return features;
3887}
3888
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003889int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003890{
3891 struct bnx2x *bp = netdev_priv(dev);
3892 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003893 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003894
3895 if (features & NETIF_F_LRO)
3896 flags |= TPA_ENABLE_FLAG;
3897 else
3898 flags &= ~TPA_ENABLE_FLAG;
3899
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003900 if (features & NETIF_F_GRO)
3901 flags |= GRO_ENABLE_FLAG;
3902 else
3903 flags &= ~GRO_ENABLE_FLAG;
3904
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003905 if (features & NETIF_F_LOOPBACK) {
3906 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3907 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3908 bnx2x_reload = true;
3909 }
3910 } else {
3911 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3912 bp->link_params.loopback_mode = LOOPBACK_NONE;
3913 bnx2x_reload = true;
3914 }
3915 }
3916
Michał Mirosław66371c42011-04-12 09:38:23 +00003917 if (flags ^ bp->flags) {
3918 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003919 bnx2x_reload = true;
3920 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003921
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003922 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003923 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3924 return bnx2x_reload_if_running(dev);
3925 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003926 }
3927
Michał Mirosław66371c42011-04-12 09:38:23 +00003928 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003929}
3930
3931void bnx2x_tx_timeout(struct net_device *dev)
3932{
3933 struct bnx2x *bp = netdev_priv(dev);
3934
3935#ifdef BNX2X_STOP_ON_ERROR
3936 if (!bp->panic)
3937 bnx2x_panic();
3938#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003939
3940 smp_mb__before_clear_bit();
3941 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3942 smp_mb__after_clear_bit();
3943
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003944 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003945 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003946}
3947
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003948int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3949{
3950 struct net_device *dev = pci_get_drvdata(pdev);
3951 struct bnx2x *bp;
3952
3953 if (!dev) {
3954 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3955 return -ENODEV;
3956 }
3957 bp = netdev_priv(dev);
3958
3959 rtnl_lock();
3960
3961 pci_save_state(pdev);
3962
3963 if (!netif_running(dev)) {
3964 rtnl_unlock();
3965 return 0;
3966 }
3967
3968 netif_device_detach(dev);
3969
3970 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3971
3972 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3973
3974 rtnl_unlock();
3975
3976 return 0;
3977}
3978
3979int bnx2x_resume(struct pci_dev *pdev)
3980{
3981 struct net_device *dev = pci_get_drvdata(pdev);
3982 struct bnx2x *bp;
3983 int rc;
3984
3985 if (!dev) {
3986 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3987 return -ENODEV;
3988 }
3989 bp = netdev_priv(dev);
3990
3991 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003992 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003993 return -EAGAIN;
3994 }
3995
3996 rtnl_lock();
3997
3998 pci_restore_state(pdev);
3999
4000 if (!netif_running(dev)) {
4001 rtnl_unlock();
4002 return 0;
4003 }
4004
4005 bnx2x_set_power_state(bp, PCI_D0);
4006 netif_device_attach(dev);
4007
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004008 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4009
4010 rtnl_unlock();
4011
4012 return rc;
4013}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004014
4015
4016void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4017 u32 cid)
4018{
4019 /* ustorm cxt validation */
4020 cxt->ustorm_ag_context.cdu_usage =
4021 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4022 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4023 /* xcontext validation */
4024 cxt->xstorm_ag_context.cdu_reserved =
4025 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4026 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4027}
4028
Eric Dumazet1191cb82012-04-27 21:39:21 +00004029static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4030 u8 fw_sb_id, u8 sb_index,
4031 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004032{
4033
4034 u32 addr = BAR_CSTRORM_INTMEM +
4035 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4036 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004037 DP(NETIF_MSG_IFUP,
4038 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4039 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004040}
4041
Eric Dumazet1191cb82012-04-27 21:39:21 +00004042static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4043 u16 fw_sb_id, u8 sb_index,
4044 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004045{
4046 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4047 u32 addr = BAR_CSTRORM_INTMEM +
4048 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4049 u16 flags = REG_RD16(bp, addr);
4050 /* clear and set */
4051 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4052 flags |= enable_flag;
4053 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004054 DP(NETIF_MSG_IFUP,
4055 "port %x fw_sb_id %d sb_index %d disable %d\n",
4056 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004057}
4058
4059void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4060 u8 sb_index, u8 disable, u16 usec)
4061{
4062 int port = BP_PORT(bp);
4063 u8 ticks = usec / BNX2X_BTR;
4064
4065 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4066
4067 disable = disable ? 1 : (usec ? 0 : 1);
4068 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4069}