blob: 00951b3aa62bf70096ce7dd771450e6235b6dad3 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000026#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000027#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000028#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000029#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000043 * source onto the target. Update txdata pointers and related
44 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000050 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000054 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000056
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000060 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000063
Barak Witkowski15192a82012-06-19 07:48:28 +000064 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
Merav Sicron65565882012-06-19 07:48:26 +000070 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087}
88
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030089int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000091/* free skb in the packet ring at pos idx
92 * return idx of last bd freed
93 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000094static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000095 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000097{
Ariel Elior6383c0b2011-07-14 08:31:57 +000098 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000099 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103 int nbd;
104
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
106 prefetch(&skb->end);
107
Merav Sicron51c1a582012-03-18 10:33:38 +0000108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000109 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000110
111 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118#ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
121 bnx2x_panic();
122 }
123#endif
124 new_cons = nbd + tx_buf->first_bd;
125
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129 /* Skip a parse bd... */
130 --nbd;
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135 --nbd;
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137 }
138
139 /* now free frags */
140 while (nbd > 0) {
141
Ariel Elior6383c0b2011-07-14 08:31:57 +0000142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145 if (--nbd)
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147 }
148
149 /* release skb */
150 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000151 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000152 (*pkts_compl)++;
153 (*bytes_compl) += skb->len;
154 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000155
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000156 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000157 tx_buf->first_bd = 0;
158 tx_buf->skb = NULL;
159
160 return new_cons;
161}
162
Ariel Elior6383c0b2011-07-14 08:31:57 +0000163int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000164{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000167 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169#ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
171 return -1;
172#endif
173
Ariel Elior6383c0b2011-07-14 08:31:57 +0000174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000177
178 while (sw_cons != hw_cons) {
179 u16 pkt_cons;
180
181 pkt_cons = TX_BD(sw_cons);
182
Merav Sicron51c1a582012-03-18 10:33:38 +0000183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
Tom Herbert2df1a702011-11-28 16:33:37 +0000187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
189
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000190 sw_cons++;
191 }
192
Tom Herbert2df1a702011-11-28 16:33:37 +0000193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000197
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
202 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206 */
207 smp_mb();
208
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214 *
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
217 * stops the queue
218 */
219
220 __netif_tx_lock(txq, smp_processor_id());
221
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
Eric Dumazetbc147862012-06-13 09:45:16 +0000224 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000225 netif_tx_wake_queue(txq);
226
227 __netif_tx_unlock(txq);
228 }
229 return 0;
230}
231
232static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233 u16 idx)
234{
235 u16 last_max = fp->last_max_sge;
236
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
239}
240
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000241static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242 u16 sge_len,
243 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000244{
245 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000246 u16 last_max, last_elem, first_elem;
247 u16 delta = 0;
248 u16 i;
249
250 if (!sge_len)
251 return;
252
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000257
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000260
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000263 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000265
266 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000269
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
272 last_elem++;
273
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
277 break;
278
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000281 }
282
283 if (delta > 0) {
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
287 }
288
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
292}
293
Eric Dumazete52fcb22011-11-14 06:05:34 +0000294/* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
296 */
297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe)
299{
300 /* Set Toeplitz hash from CQE */
301 if ((bp->dev->features & NETIF_F_RXHASH) &&
302 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
303 return le32_to_cpu(cqe->rss_hash_result);
304 return 0;
305}
306
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000307static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000308 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300309 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000310{
311 struct bnx2x *bp = fp->bp;
312 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
313 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
314 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
315 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300316 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
317 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 /* print error if current state != stop */
320 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000321 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
322
Eric Dumazete52fcb22011-11-14 06:05:34 +0000323 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300324 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000325 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300326 fp->rx_buf_size, DMA_FROM_DEVICE);
327 /*
328 * ...if it fails - move the skb from the consumer to the producer
329 * and set the current aggregation state as ERROR to drop it
330 * when TPA_STOP arrives.
331 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000332
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300333 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
334 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000335 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300336 tpa_info->tpa_state = BNX2X_TPA_ERROR;
337 return;
338 }
339
Eric Dumazete52fcb22011-11-14 06:05:34 +0000340 /* move empty data from pool to prod */
341 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300342 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000343 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000344 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
345 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
346
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300347 /* move partial skb from cons to pool (don't unmap yet) */
348 *first_buf = *cons_rx_buf;
349
350 /* mark bin state as START */
351 tpa_info->parsing_flags =
352 le16_to_cpu(cqe->pars_flags.flags);
353 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
354 tpa_info->tpa_state = BNX2X_TPA_START;
355 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
356 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000357 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000358 if (fp->mode == TPA_MODE_GRO) {
359 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
360 tpa_info->full_page =
361 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
362 tpa_info->gro_size = gro_size;
363 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300364
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000365#ifdef BNX2X_STOP_ON_ERROR
366 fp->tpa_queue_used |= (1 << queue);
367#ifdef _ASM_GENERIC_INT_L64_H
368 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
369#else
370 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
371#endif
372 fp->tpa_queue_used);
373#endif
374}
375
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000376/* Timestamp option length allowed for TPA aggregation:
377 *
378 * nop nop kind length echo val
379 */
380#define TPA_TSTAMP_OPT_LEN 12
381/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000382 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000383 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000384 * @bp: driver handle
385 * @parsing_flags: parsing flags from the START CQE
386 * @len_on_bd: total length of the first packet for the
387 * aggregation.
388 *
389 * Approximate value of the MSS for this aggregation calculated using
390 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000391 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000392static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
393 u16 len_on_bd)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000394{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300395 /*
396 * TPA arrgregation won't have either IP options or TCP options
397 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000398 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300399 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
400
401 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
402 PRS_FLAG_OVERETH_IPV6)
403 hdrs_len += sizeof(struct ipv6hdr);
404 else /* IPv4 */
405 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000406
407
408 /* Check if there was a TCP timestamp, if there is it's will
409 * always be 12 bytes length: nop nop kind length echo val.
410 *
411 * Otherwise FW would close the aggregation.
412 */
413 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
414 hdrs_len += TPA_TSTAMP_OPT_LEN;
415
416 return len_on_bd - hdrs_len;
417}
418
Eric Dumazet1191cb82012-04-27 21:39:21 +0000419static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
420 struct bnx2x_fastpath *fp, u16 index)
421{
422 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
423 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
424 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
425 dma_addr_t mapping;
426
427 if (unlikely(page == NULL)) {
428 BNX2X_ERR("Can't alloc sge\n");
429 return -ENOMEM;
430 }
431
432 mapping = dma_map_page(&bp->pdev->dev, page, 0,
433 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
434 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
435 __free_pages(page, PAGES_PER_SGE_SHIFT);
436 BNX2X_ERR("Can't map sge\n");
437 return -ENOMEM;
438 }
439
440 sw_buf->page = page;
441 dma_unmap_addr_set(sw_buf, mapping, mapping);
442
443 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
444 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
445
446 return 0;
447}
448
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000449static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000450 struct bnx2x_agg_info *tpa_info,
451 u16 pages,
452 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300453 struct eth_end_agg_rx_cqe *cqe,
454 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000455{
456 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000457 u32 i, frag_len, frag_size;
458 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300459 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000460 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000461
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300462 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000463
464 if (fp->mode == TPA_MODE_GRO) {
465 gro_size = tpa_info->gro_size;
466 full_page = tpa_info->full_page;
467 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000468
469 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000470 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300471 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
472 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000473
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000474 /* set for GRO */
475 if (fp->mode == TPA_MODE_GRO)
476 skb_shinfo(skb)->gso_type =
477 (GET_FLAG(tpa_info->parsing_flags,
478 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
479 PRS_FLAG_OVERETH_IPV6) ?
480 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
481 }
482
483
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000484#ifdef BNX2X_STOP_ON_ERROR
485 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
486 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
487 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300488 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000489 bnx2x_panic();
490 return -EINVAL;
491 }
492#endif
493
494 /* Run through the SGL and compose the fragmented skb */
495 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300496 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497
498 /* FW gives the indices of the SGE as if the ring is an array
499 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000500 if (fp->mode == TPA_MODE_GRO)
501 frag_len = min_t(u32, frag_size, (u32)full_page);
502 else /* LRO */
503 frag_len = min_t(u32, frag_size,
504 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
505
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000506 rx_pg = &fp->rx_page_ring[sge_idx];
507 old_rx_pg = *rx_pg;
508
509 /* If we fail to allocate a substitute page, we simply stop
510 where we are and drop the whole packet */
511 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
512 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000513 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 return err;
515 }
516
517 /* Unmap the page as we r going to pass it to the stack */
518 dma_unmap_page(&bp->pdev->dev,
519 dma_unmap_addr(&old_rx_pg, mapping),
520 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000521 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000522 if (fp->mode == TPA_MODE_LRO)
523 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
524 else { /* GRO */
525 int rem;
526 int offset = 0;
527 for (rem = frag_len; rem > 0; rem -= gro_size) {
528 int len = rem > gro_size ? gro_size : rem;
529 skb_fill_page_desc(skb, frag_id++,
530 old_rx_pg.page, offset, len);
531 if (offset)
532 get_page(old_rx_pg.page);
533 offset += len;
534 }
535 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536
537 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000538 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000539 skb->len += frag_len;
540
541 frag_size -= frag_len;
542 }
543
544 return 0;
545}
546
Eric Dumazet1191cb82012-04-27 21:39:21 +0000547static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 struct bnx2x_agg_info *tpa_info,
549 u16 pages,
550 struct eth_end_agg_rx_cqe *cqe,
551 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000552{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300553 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000554 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000556 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000557 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300558 u8 old_tpa_state = tpa_info->tpa_state;
559
560 tpa_info->tpa_state = BNX2X_TPA_STOP;
561
562 /* If we there was an error during the handling of the TPA_START -
563 * drop this aggregation.
564 */
565 if (old_tpa_state == BNX2X_TPA_ERROR)
566 goto drop;
567
Eric Dumazete52fcb22011-11-14 06:05:34 +0000568 /* Try to allocate the new data */
569 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000570
571 /* Unmap skb in the pool anyway, as we are going to change
572 pool entry status to BNX2X_TPA_STOP even if new skb allocation
573 fails. */
574 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800575 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000576 if (likely(new_data))
Eric Dumazetd3836f22012-04-27 00:33:38 +0000577 skb = build_skb(data, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
Eric Dumazete52fcb22011-11-14 06:05:34 +0000579 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800581 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000582 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800583 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000584 bnx2x_panic();
585 return;
586 }
587#endif
588
Eric Dumazete52fcb22011-11-14 06:05:34 +0000589 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000590 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000591 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000592
593 skb->protocol = eth_type_trans(skb, bp->dev);
594 skb->ip_summed = CHECKSUM_UNNECESSARY;
595
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000596 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
597 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300598 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
599 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000600 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000601 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000602 DP(NETIF_MSG_RX_STATUS,
603 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000604 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000605 }
606
607
Eric Dumazete52fcb22011-11-14 06:05:34 +0000608 /* put new data in bin */
609 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000610
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300611 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000612 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000613 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300614drop:
615 /* drop the packet and keep the buffer in the bin */
616 DP(NETIF_MSG_RX_STATUS,
617 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000618 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000619}
620
Eric Dumazet1191cb82012-04-27 21:39:21 +0000621static int bnx2x_alloc_rx_data(struct bnx2x *bp,
622 struct bnx2x_fastpath *fp, u16 index)
623{
624 u8 *data;
625 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
626 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
627 dma_addr_t mapping;
628
629 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
630 if (unlikely(data == NULL))
631 return -ENOMEM;
632
633 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
634 fp->rx_buf_size,
635 DMA_FROM_DEVICE);
636 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
637 kfree(data);
638 BNX2X_ERR("Can't map rx data\n");
639 return -ENOMEM;
640 }
641
642 rx_buf->data = data;
643 dma_unmap_addr_set(rx_buf, mapping, mapping);
644
645 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
646 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
647
648 return 0;
649}
650
Barak Witkowski15192a82012-06-19 07:48:28 +0000651static
652void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
653 struct bnx2x_fastpath *fp,
654 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000655{
656 /* Do nothing if no IP/L4 csum validation was done */
657
658 if (cqe->fast_path_cqe.status_flags &
659 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
660 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
661 return;
662
663 /* If both IP/L4 validation were done, check if an error was found. */
664
665 if (cqe->fast_path_cqe.type_error_flags &
666 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
667 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000668 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000669 else
670 skb->ip_summed = CHECKSUM_UNNECESSARY;
671}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000672
673int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
674{
675 struct bnx2x *bp = fp->bp;
676 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
677 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
678 int rx_pkt = 0;
679
680#ifdef BNX2X_STOP_ON_ERROR
681 if (unlikely(bp->panic))
682 return 0;
683#endif
684
685 /* CQ "next element" is of the size of the regular element,
686 that's why it's ok here */
687 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
688 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
689 hw_comp_cons++;
690
691 bd_cons = fp->rx_bd_cons;
692 bd_prod = fp->rx_bd_prod;
693 bd_prod_fw = bd_prod;
694 sw_comp_cons = fp->rx_comp_cons;
695 sw_comp_prod = fp->rx_comp_prod;
696
697 /* Memory barrier necessary as speculative reads of the rx
698 * buffer can be ahead of the index in the status block
699 */
700 rmb();
701
702 DP(NETIF_MSG_RX_STATUS,
703 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
704 fp->index, hw_comp_cons, sw_comp_cons);
705
706 while (sw_comp_cons != hw_comp_cons) {
707 struct sw_rx_bd *rx_buf = NULL;
708 struct sk_buff *skb;
709 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300710 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000711 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300712 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000713 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000714 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000715
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300716#ifdef BNX2X_STOP_ON_ERROR
717 if (unlikely(bp->panic))
718 return 0;
719#endif
720
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000721 comp_ring_cons = RCQ_BD(sw_comp_cons);
722 bd_prod = RX_BD(bd_prod);
723 bd_cons = RX_BD(bd_cons);
724
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300726 cqe_fp = &cqe->fast_path_cqe;
727 cqe_fp_flags = cqe_fp->type_error_flags;
728 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000729
Merav Sicron51c1a582012-03-18 10:33:38 +0000730 DP(NETIF_MSG_RX_STATUS,
731 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
732 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300733 cqe_fp_flags, cqe_fp->status_flags,
734 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000735 le16_to_cpu(cqe_fp->vlan_tag),
736 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000737
738 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300739 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000740 bnx2x_sp_event(fp, cqe);
741 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000742 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000743
Eric Dumazete52fcb22011-11-14 06:05:34 +0000744 rx_buf = &fp->rx_buf_ring[bd_cons];
745 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000746
Eric Dumazete52fcb22011-11-14 06:05:34 +0000747 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000748 struct bnx2x_agg_info *tpa_info;
749 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300750#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000751 /* sanity check */
752 if (fp->disable_tpa &&
753 (CQE_TYPE_START(cqe_fp_type) ||
754 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000755 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000756 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300757#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000758
Eric Dumazete52fcb22011-11-14 06:05:34 +0000759 if (CQE_TYPE_START(cqe_fp_type)) {
760 u16 queue = cqe_fp->queue_index;
761 DP(NETIF_MSG_RX_STATUS,
762 "calling tpa_start on queue %d\n",
763 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000764
Eric Dumazete52fcb22011-11-14 06:05:34 +0000765 bnx2x_tpa_start(fp, queue,
766 bd_cons, bd_prod,
767 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000768
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000769 goto next_rx;
770
771 }
772 queue = cqe->end_agg_cqe.queue_index;
773 tpa_info = &fp->tpa_info[queue];
774 DP(NETIF_MSG_RX_STATUS,
775 "calling tpa_stop on queue %d\n",
776 queue);
777
778 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
779 tpa_info->len_on_bd;
780
781 if (fp->mode == TPA_MODE_GRO)
782 pages = (frag_size + tpa_info->full_page - 1) /
783 tpa_info->full_page;
784 else
785 pages = SGE_PAGE_ALIGN(frag_size) >>
786 SGE_PAGE_SHIFT;
787
788 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
789 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000790#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000791 if (bp->panic)
792 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000793#endif
794
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000795 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
796 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000797 }
798 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000799 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000800 pad = cqe_fp->placement_offset;
801 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000803 pad + RX_COPY_THRESH,
804 DMA_FROM_DEVICE);
805 pad += NET_SKB_PAD;
806 prefetch(data + pad); /* speedup eth_type_trans() */
807 /* is this an error packet? */
808 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000809 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000810 "ERROR flags %x rx packet %u\n",
811 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000812 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000813 goto reuse_rx;
814 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000815
Eric Dumazete52fcb22011-11-14 06:05:34 +0000816 /* Since we don't have a jumbo ring
817 * copy small packets if mtu > 1500
818 */
819 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
820 (len <= RX_COPY_THRESH)) {
821 skb = netdev_alloc_skb_ip_align(bp->dev, len);
822 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000823 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000824 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000825 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000826 goto reuse_rx;
827 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000828 memcpy(skb->data, data + pad, len);
829 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
830 } else {
831 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000832 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000833 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800834 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000835 DMA_FROM_DEVICE);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000836 skb = build_skb(data, 0);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000837 if (unlikely(!skb)) {
838 kfree(data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000839 bnx2x_fp_qstats(bp, fp)->
840 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000841 goto next_rx;
842 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000843 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000844 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000845 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
846 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000847 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000848reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000849 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000850 goto next_rx;
851 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000852 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000853
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000854 skb_put(skb, len);
855 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000856
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000857 /* Set Toeplitz hash for a none-LRO skb */
858 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000859
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000860 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000861
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000862 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000863 bnx2x_csum_validate(skb, cqe, fp,
864 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000866 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000867
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300868 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
869 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000870 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300871 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000872 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000873
874
875next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000876 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000877
878 bd_cons = NEXT_RX_IDX(bd_cons);
879 bd_prod = NEXT_RX_IDX(bd_prod);
880 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
881 rx_pkt++;
882next_cqe:
883 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
884 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
885
886 if (rx_pkt == budget)
887 break;
888 } /* while */
889
890 fp->rx_bd_cons = bd_cons;
891 fp->rx_bd_prod = bd_prod_fw;
892 fp->rx_comp_cons = sw_comp_cons;
893 fp->rx_comp_prod = sw_comp_prod;
894
895 /* Update producers */
896 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
897 fp->rx_sge_prod);
898
899 fp->rx_pkt += rx_pkt;
900 fp->rx_calls++;
901
902 return rx_pkt;
903}
904
905static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
906{
907 struct bnx2x_fastpath *fp = fp_cookie;
908 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000909 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910
Merav Sicron51c1a582012-03-18 10:33:38 +0000911 DP(NETIF_MSG_INTR,
912 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000913 fp->index, fp->fw_sb_id, fp->igu_sb_id);
914 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915
916#ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
918 return IRQ_HANDLED;
919#endif
920
921 /* Handle Rx and Tx according to MSI-X vector */
922 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000923
924 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +0000925 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000926
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000927 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000928 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
929
930 return IRQ_HANDLED;
931}
932
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000933/* HW Lock for shared dual port PHYs */
934void bnx2x_acquire_phy_lock(struct bnx2x *bp)
935{
936 mutex_lock(&bp->port.phy_mutex);
937
938 if (bp->port.need_hw_lock)
939 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
940}
941
942void bnx2x_release_phy_lock(struct bnx2x *bp)
943{
944 if (bp->port.need_hw_lock)
945 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
946
947 mutex_unlock(&bp->port.phy_mutex);
948}
949
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800950/* calculates MF speed according to current linespeed and MF configuration */
951u16 bnx2x_get_mf_speed(struct bnx2x *bp)
952{
953 u16 line_speed = bp->link_vars.line_speed;
954 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000955 u16 maxCfg = bnx2x_extract_max_cfg(bp,
956 bp->mf_config[BP_VN(bp)]);
957
958 /* Calculate the current MAX line speed limit for the MF
959 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800960 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000961 if (IS_MF_SI(bp))
962 line_speed = (line_speed * maxCfg) / 100;
963 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800964 u16 vn_max_rate = maxCfg * 100;
965
966 if (vn_max_rate < line_speed)
967 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000968 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800969 }
970
971 return line_speed;
972}
973
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000974/**
975 * bnx2x_fill_report_data - fill link report data to report
976 *
977 * @bp: driver handle
978 * @data: link state to update
979 *
980 * It uses a none-atomic bit operations because is called under the mutex.
981 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000982static void bnx2x_fill_report_data(struct bnx2x *bp,
983 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000984{
985 u16 line_speed = bnx2x_get_mf_speed(bp);
986
987 memset(data, 0, sizeof(*data));
988
989 /* Fill the report data: efective line speed */
990 data->line_speed = line_speed;
991
992 /* Link is down */
993 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
994 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
995 &data->link_report_flags);
996
997 /* Full DUPLEX */
998 if (bp->link_vars.duplex == DUPLEX_FULL)
999 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1000
1001 /* Rx Flow Control is ON */
1002 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1003 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1004
1005 /* Tx Flow Control is ON */
1006 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1007 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1008}
1009
1010/**
1011 * bnx2x_link_report - report link status to OS.
1012 *
1013 * @bp: driver handle
1014 *
1015 * Calls the __bnx2x_link_report() under the same locking scheme
1016 * as a link/PHY state managing code to ensure a consistent link
1017 * reporting.
1018 */
1019
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001020void bnx2x_link_report(struct bnx2x *bp)
1021{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001022 bnx2x_acquire_phy_lock(bp);
1023 __bnx2x_link_report(bp);
1024 bnx2x_release_phy_lock(bp);
1025}
1026
1027/**
1028 * __bnx2x_link_report - report link status to OS.
1029 *
1030 * @bp: driver handle
1031 *
1032 * None atomic inmlementation.
1033 * Should be called under the phy_lock.
1034 */
1035void __bnx2x_link_report(struct bnx2x *bp)
1036{
1037 struct bnx2x_link_report_data cur_data;
1038
1039 /* reread mf_cfg */
1040 if (!CHIP_IS_E1(bp))
1041 bnx2x_read_mf_cfg(bp);
1042
1043 /* Read the current link report info */
1044 bnx2x_fill_report_data(bp, &cur_data);
1045
1046 /* Don't report link down or exactly the same link status twice */
1047 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1048 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1049 &bp->last_reported_link.link_report_flags) &&
1050 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1051 &cur_data.link_report_flags)))
1052 return;
1053
1054 bp->link_cnt++;
1055
1056 /* We are going to report a new link parameters now -
1057 * remember the current data for the next time.
1058 */
1059 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1060
1061 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001063 netif_carrier_off(bp->dev);
1064 netdev_err(bp->dev, "NIC Link is Down\n");
1065 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001066 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001067 const char *duplex;
1068 const char *flow;
1069
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001070 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001072 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1073 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001074 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075 else
Joe Perches94f05b02011-08-14 12:16:20 +00001076 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001078 /* Handle the FC at the end so that only these flags would be
1079 * possibly set. This way we may easily check if there is no FC
1080 * enabled.
1081 */
1082 if (cur_data.link_report_flags) {
1083 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1084 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001085 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1086 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001087 flow = "ON - receive & transmit";
1088 else
1089 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001090 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001091 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001092 }
Joe Perches94f05b02011-08-14 12:16:20 +00001093 } else {
1094 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001095 }
Joe Perches94f05b02011-08-14 12:16:20 +00001096 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1097 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098 }
1099}
1100
Eric Dumazet1191cb82012-04-27 21:39:21 +00001101static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1102{
1103 int i;
1104
1105 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1106 struct eth_rx_sge *sge;
1107
1108 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1109 sge->addr_hi =
1110 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1111 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1112
1113 sge->addr_lo =
1114 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1115 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1116 }
1117}
1118
1119static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1120 struct bnx2x_fastpath *fp, int last)
1121{
1122 int i;
1123
1124 for (i = 0; i < last; i++) {
1125 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1126 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1127 u8 *data = first_buf->data;
1128
1129 if (data == NULL) {
1130 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1131 continue;
1132 }
1133 if (tpa_info->tpa_state == BNX2X_TPA_START)
1134 dma_unmap_single(&bp->pdev->dev,
1135 dma_unmap_addr(first_buf, mapping),
1136 fp->rx_buf_size, DMA_FROM_DEVICE);
1137 kfree(data);
1138 first_buf->data = NULL;
1139 }
1140}
1141
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001142void bnx2x_init_rx_rings(struct bnx2x *bp)
1143{
1144 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001145 u16 ring_prod;
1146 int i, j;
1147
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001148 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001149 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001150 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001151
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001152 DP(NETIF_MSG_IFUP,
1153 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1154
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001155 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001156 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001157 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001158 struct bnx2x_agg_info *tpa_info =
1159 &fp->tpa_info[i];
1160 struct sw_rx_bd *first_buf =
1161 &tpa_info->first_buf;
1162
Eric Dumazete52fcb22011-11-14 06:05:34 +00001163 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1164 GFP_ATOMIC);
1165 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001166 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1167 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001168 bnx2x_free_tpa_pool(bp, fp, i);
1169 fp->disable_tpa = 1;
1170 break;
1171 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001172 dma_unmap_addr_set(first_buf, mapping, 0);
1173 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001174 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001175
1176 /* "next page" elements initialization */
1177 bnx2x_set_next_page_sgl(fp);
1178
1179 /* set SGEs bit mask */
1180 bnx2x_init_sge_ring_bit_mask(fp);
1181
1182 /* Allocate SGEs and initialize the ring elements */
1183 for (i = 0, ring_prod = 0;
1184 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1185
1186 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001187 BNX2X_ERR("was only able to allocate %d rx sges\n",
1188 i);
1189 BNX2X_ERR("disabling TPA for queue[%d]\n",
1190 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001191 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001192 bnx2x_free_rx_sge_range(bp, fp,
1193 ring_prod);
1194 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001195 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001196 fp->disable_tpa = 1;
1197 ring_prod = 0;
1198 break;
1199 }
1200 ring_prod = NEXT_SGE_IDX(ring_prod);
1201 }
1202
1203 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204 }
1205 }
1206
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001207 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001208 struct bnx2x_fastpath *fp = &bp->fp[j];
1209
1210 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001211
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001212 /* Activate BD ring */
1213 /* Warning!
1214 * this will generate an interrupt (to the TSTORM)
1215 * must only be done after chip is initialized
1216 */
1217 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1218 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001219
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220 if (j != 0)
1221 continue;
1222
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001223 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001224 REG_WR(bp, BAR_USTRORM_INTMEM +
1225 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1226 U64_LO(fp->rx_comp_mapping));
1227 REG_WR(bp, BAR_USTRORM_INTMEM +
1228 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1229 U64_HI(fp->rx_comp_mapping));
1230 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001231 }
1232}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001233
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001234static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1235{
1236 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001237 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001238
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001239 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001240 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001241 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00001242 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001243 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001244
Ariel Elior6383c0b2011-07-14 08:31:57 +00001245 u16 sw_prod = txdata->tx_pkt_prod;
1246 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001247
Ariel Elior6383c0b2011-07-14 08:31:57 +00001248 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001249 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1250 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001251 sw_cons++;
1252 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001253 netdev_tx_reset_queue(
Merav Sicron65565882012-06-19 07:48:26 +00001254 netdev_get_tx_queue(bp->dev,
1255 txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001256 }
1257 }
1258}
1259
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001260static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1261{
1262 struct bnx2x *bp = fp->bp;
1263 int i;
1264
1265 /* ring wasn't allocated */
1266 if (fp->rx_buf_ring == NULL)
1267 return;
1268
1269 for (i = 0; i < NUM_RX_BD; i++) {
1270 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001271 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001272
Eric Dumazete52fcb22011-11-14 06:05:34 +00001273 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001274 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001275 dma_unmap_single(&bp->pdev->dev,
1276 dma_unmap_addr(rx_buf, mapping),
1277 fp->rx_buf_size, DMA_FROM_DEVICE);
1278
Eric Dumazete52fcb22011-11-14 06:05:34 +00001279 rx_buf->data = NULL;
1280 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001281 }
1282}
1283
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001284static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1285{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001286 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001287
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001288 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001289 struct bnx2x_fastpath *fp = &bp->fp[j];
1290
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001291 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001293 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001294 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295 }
1296}
1297
1298void bnx2x_free_skbs(struct bnx2x *bp)
1299{
1300 bnx2x_free_tx_skbs(bp);
1301 bnx2x_free_rx_skbs(bp);
1302}
1303
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001304void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1305{
1306 /* load old values */
1307 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1308
1309 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1310 /* leave all but MAX value */
1311 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1312
1313 /* set new MAX value */
1314 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1315 & FUNC_MF_CFG_MAX_BW_MASK;
1316
1317 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1318 }
1319}
1320
Dmitry Kravkovca924292011-06-14 01:33:08 +00001321/**
1322 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1323 *
1324 * @bp: driver handle
1325 * @nvecs: number of vectors to be released
1326 */
1327static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001328{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001329 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001330
Dmitry Kravkovca924292011-06-14 01:33:08 +00001331 if (nvecs == offset)
1332 return;
1333 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001334 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001335 bp->msix_table[offset].vector);
1336 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001337#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001338 if (nvecs == offset)
1339 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001340 offset++;
1341#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001342
Dmitry Kravkovca924292011-06-14 01:33:08 +00001343 for_each_eth_queue(bp, i) {
1344 if (nvecs == offset)
1345 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001346 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1347 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001348
1349 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001350 }
1351}
1352
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001353void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001355 if (bp->flags & USING_MSIX_FLAG &&
1356 !(bp->flags & USING_SINGLE_MSIX_FLAG))
Dmitry Kravkovca924292011-06-14 01:33:08 +00001357 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001358 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001359 else
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001360 free_irq(bp->dev->irq, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001361}
1362
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001363int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001365 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001366
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001367 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001368 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001369 bp->msix_table[0].entry);
1370 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001371
1372#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001373 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001374 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001375 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1376 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001377#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001378 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001379 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001380 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001381 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1382 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001383 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384 }
1385
Ariel Elior6383c0b2011-07-14 08:31:57 +00001386 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001387
1388 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001389
1390 /*
1391 * reconfigure number of tx/rx queues according to available
1392 * MSI-X vectors
1393 */
1394 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001395 /* how less vectors we will have? */
1396 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001397
Merav Sicron51c1a582012-03-18 10:33:38 +00001398 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001399
1400 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1401
1402 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001403 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1404 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001405 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001406 /*
1407 * decrease number of queues by number of unallocated entries
1408 */
1409 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410
Merav Sicron51c1a582012-03-18 10:33:38 +00001411 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001412 bp->num_queues);
1413 } else if (rc > 0) {
1414 /* Get by with single vector */
1415 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1416 if (rc) {
1417 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1418 rc);
1419 goto no_msix;
1420 }
1421
1422 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1423 bp->flags |= USING_SINGLE_MSIX_FLAG;
1424
1425 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001426 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001427 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001428 }
1429
1430 bp->flags |= USING_MSIX_FLAG;
1431
1432 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001433
1434no_msix:
1435 /* fall to INTx if not enough memory */
1436 if (rc == -ENOMEM)
1437 bp->flags |= DISABLE_MSI_FLAG;
1438
1439 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001440}
1441
1442static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1443{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001444 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001445
Dmitry Kravkovca924292011-06-14 01:33:08 +00001446 rc = request_irq(bp->msix_table[offset++].vector,
1447 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001448 bp->dev->name, bp->dev);
1449 if (rc) {
1450 BNX2X_ERR("request sp irq failed\n");
1451 return -EBUSY;
1452 }
1453
1454#ifdef BCM_CNIC
1455 offset++;
1456#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001457 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458 struct bnx2x_fastpath *fp = &bp->fp[i];
1459 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1460 bp->dev->name, i);
1461
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001462 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001463 bnx2x_msix_fp_int, 0, fp->name, fp);
1464 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001465 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1466 bp->msix_table[offset].vector, rc);
1467 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001468 return -EBUSY;
1469 }
1470
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001471 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472 }
1473
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001474 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001475 offset = 1 + CNIC_PRESENT;
Merav Sicron51c1a582012-03-18 10:33:38 +00001476 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001477 bp->msix_table[0].vector,
1478 0, bp->msix_table[offset].vector,
1479 i - 1, bp->msix_table[offset + i - 1].vector);
1480
1481 return 0;
1482}
1483
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001484int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485{
1486 int rc;
1487
1488 rc = pci_enable_msi(bp->pdev);
1489 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001490 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001491 return -1;
1492 }
1493 bp->flags |= USING_MSI_FLAG;
1494
1495 return 0;
1496}
1497
1498static int bnx2x_req_irq(struct bnx2x *bp)
1499{
1500 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001501 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001502
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001503 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001504 flags = 0;
1505 else
1506 flags = IRQF_SHARED;
1507
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001508 if (bp->flags & USING_MSIX_FLAG)
1509 irq = bp->msix_table[0].vector;
1510 else
1511 irq = bp->pdev->irq;
1512
1513 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001514}
1515
Eric Dumazet1191cb82012-04-27 21:39:21 +00001516static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001517{
1518 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001519 if (bp->flags & USING_MSIX_FLAG &&
1520 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001521 rc = bnx2x_req_msix_irqs(bp);
1522 if (rc)
1523 return rc;
1524 } else {
1525 bnx2x_ack_int(bp);
1526 rc = bnx2x_req_irq(bp);
1527 if (rc) {
1528 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1529 return rc;
1530 }
1531 if (bp->flags & USING_MSI_FLAG) {
1532 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001533 netdev_info(bp->dev, "using MSI IRQ %d\n",
1534 bp->dev->irq);
1535 }
1536 if (bp->flags & USING_MSIX_FLAG) {
1537 bp->dev->irq = bp->msix_table[0].vector;
1538 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1539 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001540 }
1541 }
1542
1543 return 0;
1544}
1545
Eric Dumazet1191cb82012-04-27 21:39:21 +00001546static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001547{
1548 int i;
1549
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001550 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001551 napi_enable(&bnx2x_fp(bp, i, napi));
1552}
1553
Eric Dumazet1191cb82012-04-27 21:39:21 +00001554static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001555{
1556 int i;
1557
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001558 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001559 napi_disable(&bnx2x_fp(bp, i, napi));
1560}
1561
1562void bnx2x_netif_start(struct bnx2x *bp)
1563{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001564 if (netif_running(bp->dev)) {
1565 bnx2x_napi_enable(bp);
1566 bnx2x_int_enable(bp);
1567 if (bp->state == BNX2X_STATE_OPEN)
1568 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001569 }
1570}
1571
1572void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1573{
1574 bnx2x_int_disable_sync(bp, disable_hw);
1575 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001576}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001577
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001578u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1579{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001580 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001581
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001582#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001583 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001584 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1585 u16 ether_type = ntohs(hdr->h_proto);
1586
1587 /* Skip VLAN tag if present */
1588 if (ether_type == ETH_P_8021Q) {
1589 struct vlan_ethhdr *vhdr =
1590 (struct vlan_ethhdr *)skb->data;
1591
1592 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1593 }
1594
1595 /* If ethertype is FCoE or FIP - use FCoE ring */
1596 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001597 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001598 }
1599#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001600 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001601 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001602}
1603
Dmitry Kravkov96305232012-04-03 18:41:30 +00001604
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001605void bnx2x_set_num_queues(struct bnx2x *bp)
1606{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001607 /* RSS queues */
1608 bp->num_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001609
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001610#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00001611 /* override in STORAGE SD modes */
1612 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001613 bp->num_queues = 1;
1614#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001615 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001616 bp->num_queues += NON_ETH_CONTEXT_USE;
Merav Sicron65565882012-06-19 07:48:26 +00001617
1618 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001619}
1620
David S. Miller823dcd22011-08-20 10:39:12 -07001621/**
1622 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1623 *
1624 * @bp: Driver handle
1625 *
1626 * We currently support for at most 16 Tx queues for each CoS thus we will
1627 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1628 * bp->max_cos.
1629 *
1630 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1631 * index after all ETH L2 indices.
1632 *
1633 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1634 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1635 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1636 *
1637 * The proper configuration of skb->queue_mapping is handled by
1638 * bnx2x_select_queue() and __skb_tx_hash().
1639 *
1640 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1641 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1642 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001643static int bnx2x_set_real_num_queues(struct bnx2x *bp)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001644{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001645 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001646
Merav Sicron65565882012-06-19 07:48:26 +00001647 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1648 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001649
1650/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001651#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001652 if (!NO_FCOE(bp)) {
1653 rx += FCOE_PRESENT;
1654 tx += FCOE_PRESENT;
1655 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001656#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001657
1658 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1659 if (rc) {
1660 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1661 return rc;
1662 }
1663 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1664 if (rc) {
1665 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1666 return rc;
1667 }
1668
Merav Sicron51c1a582012-03-18 10:33:38 +00001669 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001670 tx, rx);
1671
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001672 return rc;
1673}
1674
Eric Dumazet1191cb82012-04-27 21:39:21 +00001675static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001676{
1677 int i;
1678
1679 for_each_queue(bp, i) {
1680 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001681 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001682
1683 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1684 if (IS_FCOE_IDX(i))
1685 /*
1686 * Although there are no IP frames expected to arrive to
1687 * this ring we still want to add an
1688 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1689 * overrun attack.
1690 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001691 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001692 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001693 mtu = bp->dev->mtu;
1694 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1695 IP_HEADER_ALIGNMENT_PADDING +
1696 ETH_OVREHEAD +
1697 mtu +
1698 BNX2X_FW_RX_ALIGN_END;
1699 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001700 }
1701}
1702
Eric Dumazet1191cb82012-04-27 21:39:21 +00001703static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001704{
1705 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001706 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1707
Dmitry Kravkov96305232012-04-03 18:41:30 +00001708 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001709 * enabled
1710 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001711 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1712 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001713 bp->fp->cl_id +
1714 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001715
1716 /*
1717 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1718 * per-port, so if explicit configuration is needed , do it only
1719 * for a PMF.
1720 *
1721 * For 57712 and newer on the other hand it's a per-function
1722 * configuration.
1723 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001724 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001725}
1726
Dmitry Kravkov96305232012-04-03 18:41:30 +00001727int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001728 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001729{
Yuval Mintz3b603062012-03-18 10:33:39 +00001730 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001731 int i;
1732
1733 /* Although RSS is meaningless when there is a single HW queue we
1734 * still need it enabled in order to have HW Rx hash generated.
1735 *
1736 * if (!is_eth_multi(bp))
1737 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1738 */
1739
Dmitry Kravkov96305232012-04-03 18:41:30 +00001740 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001741
1742 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1743
Dmitry Kravkov96305232012-04-03 18:41:30 +00001744 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001745
Dmitry Kravkov96305232012-04-03 18:41:30 +00001746 /* RSS configuration */
1747 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1748 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1749 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1750 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001751 if (rss_obj->udp_rss_v4)
1752 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1753 if (rss_obj->udp_rss_v6)
1754 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001755
Dmitry Kravkov96305232012-04-03 18:41:30 +00001756 /* Hash bits */
1757 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001758
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001759 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001760
Dmitry Kravkov96305232012-04-03 18:41:30 +00001761 if (config_hash) {
1762 /* RSS keys */
1763 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1764 params.rss_key[i] = random32();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001765
Dmitry Kravkov96305232012-04-03 18:41:30 +00001766 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001767 }
1768
1769 return bnx2x_config_rss(bp, &params);
1770}
1771
Eric Dumazet1191cb82012-04-27 21:39:21 +00001772static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001773{
Yuval Mintz3b603062012-03-18 10:33:39 +00001774 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001775
1776 /* Prepare parameters for function state transitions */
1777 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1778
1779 func_params.f_obj = &bp->func_obj;
1780 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1781
1782 func_params.params.hw_init.load_phase = load_code;
1783
1784 return bnx2x_func_state_change(bp, &func_params);
1785}
1786
1787/*
1788 * Cleans the object that have internal lists without sending
1789 * ramrods. Should be run when interrutps are disabled.
1790 */
1791static void bnx2x_squeeze_objects(struct bnx2x *bp)
1792{
1793 int rc;
1794 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00001795 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00001796 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001797
1798 /***************** Cleanup MACs' object first *************************/
1799
1800 /* Wait for completion of requested */
1801 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1802 /* Perform a dry cleanup */
1803 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1804
1805 /* Clean ETH primary MAC */
1806 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00001807 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001808 &ramrod_flags);
1809 if (rc != 0)
1810 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1811
1812 /* Cleanup UC list */
1813 vlan_mac_flags = 0;
1814 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1815 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1816 &ramrod_flags);
1817 if (rc != 0)
1818 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1819
1820 /***************** Now clean mcast object *****************************/
1821 rparam.mcast_obj = &bp->mcast_obj;
1822 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1823
1824 /* Add a DEL command... */
1825 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1826 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00001827 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1828 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001829
1830 /* ...and wait until all pending commands are cleared */
1831 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1832 while (rc != 0) {
1833 if (rc < 0) {
1834 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1835 rc);
1836 return;
1837 }
1838
1839 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1840 }
1841}
1842
1843#ifndef BNX2X_STOP_ON_ERROR
1844#define LOAD_ERROR_EXIT(bp, label) \
1845 do { \
1846 (bp)->state = BNX2X_STATE_ERROR; \
1847 goto label; \
1848 } while (0)
1849#else
1850#define LOAD_ERROR_EXIT(bp, label) \
1851 do { \
1852 (bp)->state = BNX2X_STATE_ERROR; \
1853 (bp)->panic = 1; \
1854 return -EBUSY; \
1855 } while (0)
1856#endif
1857
Yuval Mintz452427b2012-03-26 20:47:07 +00001858bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1859{
1860 /* build FW version dword */
1861 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1862 (BCM_5710_FW_MINOR_VERSION << 8) +
1863 (BCM_5710_FW_REVISION_VERSION << 16) +
1864 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1865
1866 /* read loaded FW from chip */
1867 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1868
1869 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1870
1871 if (loaded_fw != my_fw) {
1872 if (is_err)
1873 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1874 loaded_fw, my_fw);
1875 return false;
1876 }
1877
1878 return true;
1879}
1880
Eric Dumazet1191cb82012-04-27 21:39:21 +00001881/**
1882 * bnx2x_bz_fp - zero content of the fastpath structure.
1883 *
1884 * @bp: driver handle
1885 * @index: fastpath index to be zeroed
1886 *
1887 * Makes sure the contents of the bp->fp[index].napi is kept
1888 * intact.
1889 */
1890static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1891{
1892 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00001893 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1894
Merav Sicron65565882012-06-19 07:48:26 +00001895 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001896 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00001897 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001898 /* bzero bnx2x_fastpath contents */
Barak Witkowski15192a82012-06-19 07:48:28 +00001899 if (bp->stats_init) {
1900 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001901 memset(fp, 0, sizeof(*fp));
Barak Witkowski15192a82012-06-19 07:48:28 +00001902 } else {
Eric Dumazet1191cb82012-04-27 21:39:21 +00001903 /* Keep Queue statistics */
1904 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1905 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1906
1907 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1908 GFP_KERNEL);
1909 if (tmp_eth_q_stats)
Barak Witkowski15192a82012-06-19 07:48:28 +00001910 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001911 sizeof(struct bnx2x_eth_q_stats));
1912
1913 tmp_eth_q_stats_old =
1914 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1915 GFP_KERNEL);
1916 if (tmp_eth_q_stats_old)
Barak Witkowski15192a82012-06-19 07:48:28 +00001917 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001918 sizeof(struct bnx2x_eth_q_stats_old));
1919
Barak Witkowski15192a82012-06-19 07:48:28 +00001920 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001921 memset(fp, 0, sizeof(*fp));
1922
1923 if (tmp_eth_q_stats) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001924 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1925 sizeof(struct bnx2x_eth_q_stats));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001926 kfree(tmp_eth_q_stats);
1927 }
1928
1929 if (tmp_eth_q_stats_old) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001930 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001931 sizeof(struct bnx2x_eth_q_stats_old));
1932 kfree(tmp_eth_q_stats_old);
1933 }
1934
1935 }
1936
1937 /* Restore the NAPI object as it has been already initialized */
1938 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00001939 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001940 fp->bp = bp;
1941 fp->index = index;
1942 if (IS_ETH_FP(fp))
1943 fp->max_cos = bp->max_cos;
1944 else
1945 /* Special queues support only one CoS */
1946 fp->max_cos = 1;
1947
Merav Sicron65565882012-06-19 07:48:26 +00001948 /* Init txdata pointers */
1949#ifdef BCM_CNIC
1950 if (IS_FCOE_FP(fp))
1951 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1952#endif
1953 if (IS_ETH_FP(fp))
1954 for_each_cos_in_tx_queue(fp, cos)
1955 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1956 BNX2X_NUM_ETH_QUEUES(bp) + index];
1957
Eric Dumazet1191cb82012-04-27 21:39:21 +00001958 /*
1959 * set the tpa flag for each queue. The tpa flag determines the queue
1960 * minimal size so it must be set prior to queue memory allocation
1961 */
1962 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1963 (bp->flags & GRO_ENABLE_FLAG &&
1964 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1965 if (bp->flags & TPA_ENABLE_FLAG)
1966 fp->mode = TPA_MODE_LRO;
1967 else if (bp->flags & GRO_ENABLE_FLAG)
1968 fp->mode = TPA_MODE_GRO;
1969
1970#ifdef BCM_CNIC
1971 /* We don't want TPA on an FCoE L2 ring */
1972 if (IS_FCOE_FP(fp))
1973 fp->disable_tpa = 1;
1974#endif
1975}
1976
1977
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001978/* must be called with rtnl_lock */
1979int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1980{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001981 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001982 u32 load_code;
1983 int i, rc;
1984
1985#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00001986 if (unlikely(bp->panic)) {
1987 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001988 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00001989 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001990#endif
1991
1992 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1993
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001994 /* Set the initial link reported state to link down */
1995 bnx2x_acquire_phy_lock(bp);
1996 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1997 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1998 &bp->last_reported_link.link_report_flags);
1999 bnx2x_release_phy_lock(bp);
2000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002001 /* must be called before memory allocation and HW init */
2002 bnx2x_ilt_set_info(bp);
2003
Ariel Elior6383c0b2011-07-14 08:31:57 +00002004 /*
2005 * Zero fastpath structures preserving invariants like napi, which are
2006 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002007 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002008 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002009 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002010 for_each_queue(bp, i)
2011 bnx2x_bz_fp(bp, i);
Merav Sicron65565882012-06-19 07:48:26 +00002012 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2013 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002014
Ariel Elior6383c0b2011-07-14 08:31:57 +00002015
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002016 /* Set the receive queues buffer size */
2017 bnx2x_set_rx_buf_size(bp);
2018
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002019 if (bnx2x_alloc_mem(bp))
2020 return -ENOMEM;
2021
2022 /* As long as bnx2x_alloc_mem() may possibly update
2023 * bp->num_queues, bnx2x_set_real_num_queues() should always
2024 * come after it.
2025 */
2026 rc = bnx2x_set_real_num_queues(bp);
2027 if (rc) {
2028 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002029 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002030 }
2031
Ariel Elior6383c0b2011-07-14 08:31:57 +00002032 /* configure multi cos mappings in kernel.
2033 * this configuration may be overriden by a multi class queue discipline
2034 * or by a dcbx negotiation result.
2035 */
2036 bnx2x_setup_tc(bp->dev, bp->max_cos);
2037
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038 bnx2x_napi_enable(bp);
2039
Ariel Elior889b9af2012-01-26 06:01:51 +00002040 /* set pf load just before approaching the MCP */
2041 bnx2x_set_pf_load(bp);
2042
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002043 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002044 * Returns the type of LOAD command:
2045 * if it is the first port to be initialized
2046 * common blocks should be initialized, otherwise - not
2047 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002048 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00002049 /* init fw_seq */
2050 bp->fw_seq =
2051 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2052 DRV_MSG_SEQ_NUMBER_MASK);
2053 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2054
2055 /* Get current FW pulse sequence */
2056 bp->fw_drv_pulse_wr_seq =
2057 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2058 DRV_PULSE_SEQ_MASK);
2059 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2060
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002061 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002062 if (!load_code) {
2063 BNX2X_ERR("MCP response failure, aborting\n");
2064 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002065 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002066 }
2067 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002068 BNX2X_ERR("Driver load refused\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002069 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002070 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002071 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00002072 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2073 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002074 /* abort nic load if version mismatch */
Yuval Mintz452427b2012-03-26 20:47:07 +00002075 if (!bnx2x_test_firmware_version(bp, true)) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002076 rc = -EBUSY;
2077 LOAD_ERROR_EXIT(bp, load_error2);
2078 }
2079 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002080
2081 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002082 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002083
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002084 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2085 path, load_count[path][0], load_count[path][1],
2086 load_count[path][2]);
2087 load_count[path][0]++;
2088 load_count[path][1 + port]++;
2089 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2090 path, load_count[path][0], load_count[path][1],
2091 load_count[path][2]);
2092 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002093 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002094 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002095 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2096 else
2097 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2098 }
2099
2100 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002101 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002102 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002103 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002104 /*
2105 * We need the barrier to ensure the ordering between the
2106 * writing to bp->port.pmf here and reading it from the
2107 * bnx2x_periodic_task().
2108 */
2109 smp_mb();
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002110 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002111 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002112
Merav Sicron51c1a582012-03-18 10:33:38 +00002113 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002114
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115 /* Init Function state controlling object */
2116 bnx2x__init_func_obj(bp);
2117
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002118 /* Initialize HW */
2119 rc = bnx2x_init_hw(bp, load_code);
2120 if (rc) {
2121 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002122 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002123 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002124 }
2125
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002126 /* Connect to IRQs */
2127 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002128 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002129 BNX2X_ERR("IRQs setup failed\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002130 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002131 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002132 }
2133
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002134 /* Setup NIC internals and enable interrupts */
2135 bnx2x_nic_init(bp, load_code);
2136
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002137 /* Init per-function objects */
2138 bnx2x_init_bp_objs(bp);
2139
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002140 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2141 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002142 (bp->common.shmem2_base)) {
2143 if (SHMEM2_HAS(bp, dcc_support))
2144 SHMEM2_WR(bp, dcc_support,
2145 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2146 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
Barak Witkowskia3348722012-04-23 03:04:46 +00002147 if (SHMEM2_HAS(bp, afex_driver_support))
2148 SHMEM2_WR(bp, afex_driver_support,
2149 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002150 }
2151
Barak Witkowskia3348722012-04-23 03:04:46 +00002152 /* Set AFEX default VLAN tag to an invalid value */
2153 bp->afex_def_vlan_tag = -1;
2154
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002155 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2156 rc = bnx2x_func_start(bp);
2157 if (rc) {
2158 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00002159 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002160 LOAD_ERROR_EXIT(bp, load_error3);
2161 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002162
2163 /* Send LOAD_DONE command to MCP */
2164 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002165 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002166 if (!load_code) {
2167 BNX2X_ERR("MCP response failure, aborting\n");
2168 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002169 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002170 }
2171 }
2172
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002173 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174 if (rc) {
2175 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002176 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002177 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002178
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002179#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002180 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002181 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002182#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002183
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002184 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002185 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Merav Sicron51c1a582012-03-18 10:33:38 +00002186 if (rc) {
2187 BNX2X_ERR("Queue setup failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002188 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002189 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002190 }
2191
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002192 rc = bnx2x_init_rss_pf(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002193 if (rc) {
2194 BNX2X_ERR("PF RSS init failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002195 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002196 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002197
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002198 /* Now when Clients are configured we are ready to work */
2199 bp->state = BNX2X_STATE_OPEN;
2200
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002201 /* Configure a ucast MAC */
2202 rc = bnx2x_set_eth_mac(bp, true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002203 if (rc) {
2204 BNX2X_ERR("Setting Ethernet MAC failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002205 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002206 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002207
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002208 if (bp->pending_max) {
2209 bnx2x_update_max_mf_config(bp, bp->pending_max);
2210 bp->pending_max = 0;
2211 }
2212
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002213 if (bp->port.pmf)
2214 bnx2x_initial_phy_init(bp, load_mode);
2215
2216 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002217
2218 /* Initialize Rx filter. */
2219 netif_addr_lock_bh(bp->dev);
2220 bnx2x_set_rx_mode(bp->dev);
2221 netif_addr_unlock_bh(bp->dev);
2222
2223 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002224 switch (load_mode) {
2225 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002226 /* Tx queue should be only reenabled */
2227 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002228 break;
2229
2230 case LOAD_OPEN:
2231 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002232 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002233 break;
2234
2235 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002236 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002237 bp->state = BNX2X_STATE_DIAG;
2238 break;
2239
2240 default:
2241 break;
2242 }
2243
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002244 if (bp->port.pmf)
Yuval Mintze695a2d2012-03-12 11:22:06 +00002245 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002246 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002247 bnx2x__link_status_update(bp);
2248
2249 /* start the timer */
2250 mod_timer(&bp->timer, jiffies + bp->current_interval);
2251
2252#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002253 /* re-read iscsi info */
2254 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002255 bnx2x_setup_cnic_irq_info(bp);
Merav Sicron37ae41a2012-06-19 07:48:27 +00002256 bnx2x_setup_cnic_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002257 if (bp->state == BNX2X_STATE_OPEN)
2258 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2259#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002260
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002261 /* mark driver is loaded in shmem2 */
2262 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2263 u32 val;
2264 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2265 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2266 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2267 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2268 }
2269
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002270 /* Wait for all pending SP commands to complete */
2271 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2272 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2273 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2274 return -EBUSY;
2275 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002276
Barak Witkowski98768792012-06-19 07:48:31 +00002277 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2278 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2279 bnx2x_dcbx_init(bp, false);
2280
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002281 return 0;
2282
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002283#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002284load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002285#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002286 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002287 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002288#endif
2289load_error3:
2290 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002291
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002292 /* Clean queueable objects */
2293 bnx2x_squeeze_objects(bp);
2294
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002295 /* Free SKBs, SGEs, TPA pool and driver internals */
2296 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002297 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002298 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002299
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002300 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002301 bnx2x_free_irq(bp);
2302load_error2:
2303 if (!BP_NOMCP(bp)) {
2304 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2305 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2306 }
2307
2308 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002309load_error1:
2310 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002311 /* clear pf_load status, as it was already set */
2312 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002313load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002314 bnx2x_free_mem(bp);
2315
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002316 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002317#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002318}
2319
2320/* must be called with rtnl_lock */
2321int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2322{
2323 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002324 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002325
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002326 /* mark driver is unloaded in shmem2 */
2327 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2328 u32 val;
2329 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2330 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2331 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2332 }
2333
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002334 if ((bp->state == BNX2X_STATE_CLOSED) ||
2335 (bp->state == BNX2X_STATE_ERROR)) {
2336 /* We can get here if the driver has been unloaded
2337 * during parity error recovery and is either waiting for a
2338 * leader to complete or for other functions to unload and
2339 * then ifdown has been issued. In this case we want to
2340 * unload and let other functions to complete a recovery
2341 * process.
2342 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002343 bp->recovery_state = BNX2X_RECOVERY_DONE;
2344 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002345 bnx2x_release_leader_lock(bp);
2346 smp_mb();
2347
Merav Sicron51c1a582012-03-18 10:33:38 +00002348 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2349 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002350 return -EINVAL;
2351 }
2352
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002353 /*
2354 * It's important to set the bp->state to the value different from
2355 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2356 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2357 */
2358 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2359 smp_mb();
2360
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002361 /* Stop Tx */
2362 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002363 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002364
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002365#ifdef BCM_CNIC
2366 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2367#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002368
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002369 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002370
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002371 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002372
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002373 /* Set ALWAYS_ALIVE bit in shmem */
2374 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2375
2376 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002377
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002378 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002379 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002380
2381 /* Cleanup the chip if needed */
2382 if (unload_mode != UNLOAD_RECOVERY)
2383 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002384 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002385 /* Send the UNLOAD_REQUEST to the MCP */
2386 bnx2x_send_unload_req(bp, unload_mode);
2387
2388 /*
2389 * Prevent transactions to host from the functions on the
2390 * engine that doesn't reset global blocks in case of global
2391 * attention once gloabl blocks are reset and gates are opened
2392 * (the engine which leader will perform the recovery
2393 * last).
2394 */
2395 if (!CHIP_IS_E1x(bp))
2396 bnx2x_pf_disable(bp);
2397
2398 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002399 bnx2x_netif_stop(bp, 1);
2400
2401 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002402 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002403
2404 /* Report UNLOAD_DONE to MCP */
2405 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002406 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002407
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002408 /*
2409 * At this stage no more interrupts will arrive so we may safly clean
2410 * the queueable objects here in case they failed to get cleaned so far.
2411 */
2412 bnx2x_squeeze_objects(bp);
2413
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002414 /* There should be no more pending SP commands at this stage */
2415 bp->sp_state = 0;
2416
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002417 bp->port.pmf = 0;
2418
2419 /* Free SKBs, SGEs, TPA pool and driver internals */
2420 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002421 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002422 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002423
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002424 bnx2x_free_mem(bp);
2425
2426 bp->state = BNX2X_STATE_CLOSED;
2427
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002428 /* Check if there are pending parity attentions. If there are - set
2429 * RECOVERY_IN_PROGRESS.
2430 */
2431 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2432 bnx2x_set_reset_in_progress(bp);
2433
2434 /* Set RESET_IS_GLOBAL if needed */
2435 if (global)
2436 bnx2x_set_reset_global(bp);
2437 }
2438
2439
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002440 /* The last driver must disable a "close the gate" if there is no
2441 * parity attention or "process kill" pending.
2442 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002443 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002444 bnx2x_disable_close_the_gate(bp);
2445
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002446 return 0;
2447}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002448
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002449int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2450{
2451 u16 pmcsr;
2452
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002453 /* If there is no power capability, silently succeed */
2454 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002455 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002456 return 0;
2457 }
2458
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002459 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2460
2461 switch (state) {
2462 case PCI_D0:
2463 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2464 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2465 PCI_PM_CTRL_PME_STATUS));
2466
2467 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2468 /* delay required during transition out of D3hot */
2469 msleep(20);
2470 break;
2471
2472 case PCI_D3hot:
2473 /* If there are other clients above don't
2474 shut down the power */
2475 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2476 return 0;
2477 /* Don't shut down the power for emulation and FPGA */
2478 if (CHIP_REV_IS_SLOW(bp))
2479 return 0;
2480
2481 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2482 pmcsr |= 3;
2483
2484 if (bp->wol)
2485 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2486
2487 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2488 pmcsr);
2489
2490 /* No more memory access after this point until
2491 * device is brought back to D0.
2492 */
2493 break;
2494
2495 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00002496 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002497 return -EINVAL;
2498 }
2499 return 0;
2500}
2501
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002502/*
2503 * net_device service functions
2504 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002505int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002506{
2507 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002508 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002509 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2510 napi);
2511 struct bnx2x *bp = fp->bp;
2512
2513 while (1) {
2514#ifdef BNX2X_STOP_ON_ERROR
2515 if (unlikely(bp->panic)) {
2516 napi_complete(napi);
2517 return 0;
2518 }
2519#endif
2520
Ariel Elior6383c0b2011-07-14 08:31:57 +00002521 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00002522 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2523 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002524
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002525
2526 if (bnx2x_has_rx_work(fp)) {
2527 work_done += bnx2x_rx_int(fp, budget - work_done);
2528
2529 /* must not complete if we consumed full budget */
2530 if (work_done >= budget)
2531 break;
2532 }
2533
2534 /* Fall out from the NAPI loop if needed */
2535 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002536#ifdef BCM_CNIC
2537 /* No need to update SB for FCoE L2 ring as long as
2538 * it's connected to the default SB and the SB
2539 * has been updated when NAPI was scheduled.
2540 */
2541 if (IS_FCOE_FP(fp)) {
2542 napi_complete(napi);
2543 break;
2544 }
2545#endif
2546
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002547 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002548 /* bnx2x_has_rx_work() reads the status block,
2549 * thus we need to ensure that status block indices
2550 * have been actually read (bnx2x_update_fpsb_idx)
2551 * prior to this check (bnx2x_has_rx_work) so that
2552 * we won't write the "newer" value of the status block
2553 * to IGU (if there was a DMA right after
2554 * bnx2x_has_rx_work and if there is no rmb, the memory
2555 * reading (bnx2x_update_fpsb_idx) may be postponed
2556 * to right before bnx2x_ack_sb). In this case there
2557 * will never be another interrupt until there is
2558 * another update of the status block, while there
2559 * is still unhandled work.
2560 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002561 rmb();
2562
2563 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2564 napi_complete(napi);
2565 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00002566 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002567 "Update index to %d\n", fp->fp_hc_idx);
2568 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2569 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002570 IGU_INT_ENABLE, 1);
2571 break;
2572 }
2573 }
2574 }
2575
2576 return work_done;
2577}
2578
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002579/* we split the first BD into headers and data BDs
2580 * to ease the pain of our fellow microcode engineers
2581 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002582 */
2583static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002584 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002585 struct sw_tx_bd *tx_buf,
2586 struct eth_tx_start_bd **tx_bd, u16 hlen,
2587 u16 bd_prod, int nbd)
2588{
2589 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2590 struct eth_tx_bd *d_tx_bd;
2591 dma_addr_t mapping;
2592 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2593
2594 /* first fix first BD */
2595 h_tx_bd->nbd = cpu_to_le16(nbd);
2596 h_tx_bd->nbytes = cpu_to_le16(hlen);
2597
Merav Sicron51c1a582012-03-18 10:33:38 +00002598 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2599 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002600
2601 /* now get a new data BD
2602 * (after the pbd) and fill it */
2603 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002604 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002605
2606 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2607 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2608
2609 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2610 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2611 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2612
2613 /* this marks the BD as one that has no individual mapping */
2614 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2615
2616 DP(NETIF_MSG_TX_QUEUED,
2617 "TSO split data size is %d (%x:%x)\n",
2618 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2619
2620 /* update tx_bd */
2621 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2622
2623 return bd_prod;
2624}
2625
2626static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2627{
2628 if (fix > 0)
2629 csum = (u16) ~csum_fold(csum_sub(csum,
2630 csum_partial(t_header - fix, fix, 0)));
2631
2632 else if (fix < 0)
2633 csum = (u16) ~csum_fold(csum_add(csum,
2634 csum_partial(t_header, -fix, 0)));
2635
2636 return swab16(csum);
2637}
2638
2639static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2640{
2641 u32 rc;
2642
2643 if (skb->ip_summed != CHECKSUM_PARTIAL)
2644 rc = XMIT_PLAIN;
2645
2646 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002647 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002648 rc = XMIT_CSUM_V6;
2649 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2650 rc |= XMIT_CSUM_TCP;
2651
2652 } else {
2653 rc = XMIT_CSUM_V4;
2654 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2655 rc |= XMIT_CSUM_TCP;
2656 }
2657 }
2658
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002659 if (skb_is_gso_v6(skb))
2660 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2661 else if (skb_is_gso(skb))
2662 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002663
2664 return rc;
2665}
2666
2667#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2668/* check if packet requires linearization (packet is too fragmented)
2669 no need to check fragmentation if page size > 8K (there will be no
2670 violation to FW restrictions) */
2671static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2672 u32 xmit_type)
2673{
2674 int to_copy = 0;
2675 int hlen = 0;
2676 int first_bd_sz = 0;
2677
2678 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2679 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2680
2681 if (xmit_type & XMIT_GSO) {
2682 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2683 /* Check if LSO packet needs to be copied:
2684 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2685 int wnd_size = MAX_FETCH_BD - 3;
2686 /* Number of windows to check */
2687 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2688 int wnd_idx = 0;
2689 int frag_idx = 0;
2690 u32 wnd_sum = 0;
2691
2692 /* Headers length */
2693 hlen = (int)(skb_transport_header(skb) - skb->data) +
2694 tcp_hdrlen(skb);
2695
2696 /* Amount of data (w/o headers) on linear part of SKB*/
2697 first_bd_sz = skb_headlen(skb) - hlen;
2698
2699 wnd_sum = first_bd_sz;
2700
2701 /* Calculate the first sum - it's special */
2702 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2703 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002704 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002705
2706 /* If there was data on linear skb data - check it */
2707 if (first_bd_sz > 0) {
2708 if (unlikely(wnd_sum < lso_mss)) {
2709 to_copy = 1;
2710 goto exit_lbl;
2711 }
2712
2713 wnd_sum -= first_bd_sz;
2714 }
2715
2716 /* Others are easier: run through the frag list and
2717 check all windows */
2718 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2719 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002720 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002721
2722 if (unlikely(wnd_sum < lso_mss)) {
2723 to_copy = 1;
2724 break;
2725 }
2726 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002727 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002728 }
2729 } else {
2730 /* in non-LSO too fragmented packet should always
2731 be linearized */
2732 to_copy = 1;
2733 }
2734 }
2735
2736exit_lbl:
2737 if (unlikely(to_copy))
2738 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00002739 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002740 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2741 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2742
2743 return to_copy;
2744}
2745#endif
2746
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002747static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2748 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002749{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002750 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2751 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2752 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002753 if ((xmit_type & XMIT_GSO_V6) &&
2754 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002755 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002756}
2757
2758/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002759 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002760 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002761 * @skb: packet skb
2762 * @pbd: parse BD
2763 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002764 */
2765static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2766 struct eth_tx_parse_bd_e1x *pbd,
2767 u32 xmit_type)
2768{
2769 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2770 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2771 pbd->tcp_flags = pbd_tcp_flags(skb);
2772
2773 if (xmit_type & XMIT_GSO_V4) {
2774 pbd->ip_id = swab16(ip_hdr(skb)->id);
2775 pbd->tcp_pseudo_csum =
2776 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2777 ip_hdr(skb)->daddr,
2778 0, IPPROTO_TCP, 0));
2779
2780 } else
2781 pbd->tcp_pseudo_csum =
2782 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2783 &ipv6_hdr(skb)->daddr,
2784 0, IPPROTO_TCP, 0));
2785
2786 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2787}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002788
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002789/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002790 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002791 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002792 * @bp: driver handle
2793 * @skb: packet skb
2794 * @parsing_data: data to be updated
2795 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002796 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002797 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002798 */
2799static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002800 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002801{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002802 *parsing_data |=
2803 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2804 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2805 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002806
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002807 if (xmit_type & XMIT_CSUM_TCP) {
2808 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2809 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2810 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002811
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002812 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2813 } else
2814 /* We support checksum offload for TCP and UDP only.
2815 * No need to pass the UDP header length - it's a constant.
2816 */
2817 return skb_transport_header(skb) +
2818 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002819}
2820
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002821static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2822 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2823{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002824 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2825
2826 if (xmit_type & XMIT_CSUM_V4)
2827 tx_start_bd->bd_flags.as_bitfield |=
2828 ETH_TX_BD_FLAGS_IP_CSUM;
2829 else
2830 tx_start_bd->bd_flags.as_bitfield |=
2831 ETH_TX_BD_FLAGS_IPV6;
2832
2833 if (!(xmit_type & XMIT_CSUM_TCP))
2834 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002835}
2836
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002837/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002838 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002839 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002840 * @bp: driver handle
2841 * @skb: packet skb
2842 * @pbd: parse BD to be updated
2843 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002844 */
2845static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2846 struct eth_tx_parse_bd_e1x *pbd,
2847 u32 xmit_type)
2848{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002849 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002850
2851 /* for now NS flag is not used in Linux */
2852 pbd->global_data =
2853 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2854 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2855
2856 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002857 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002858
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002859 hlen += pbd->ip_hlen_w;
2860
2861 /* We support checksum offload for TCP and UDP only */
2862 if (xmit_type & XMIT_CSUM_TCP)
2863 hlen += tcp_hdrlen(skb) / 2;
2864 else
2865 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002866
2867 pbd->total_hlen_w = cpu_to_le16(hlen);
2868 hlen = hlen*2;
2869
2870 if (xmit_type & XMIT_CSUM_TCP) {
2871 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2872
2873 } else {
2874 s8 fix = SKB_CS_OFF(skb); /* signed! */
2875
2876 DP(NETIF_MSG_TX_QUEUED,
2877 "hlen %d fix %d csum before fix %x\n",
2878 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2879
2880 /* HW bug: fixup the CSUM */
2881 pbd->tcp_pseudo_csum =
2882 bnx2x_csum_fix(skb_transport_header(skb),
2883 SKB_CS(skb), fix);
2884
2885 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2886 pbd->tcp_pseudo_csum);
2887 }
2888
2889 return hlen;
2890}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002891
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002892/* called with netif_tx_lock
2893 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2894 * netif_wake_queue()
2895 */
2896netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2897{
2898 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002899
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002900 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002901 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002902 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002903 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002904 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002905 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002906 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002907 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002908 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00002909 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002910 dma_addr_t mapping;
2911 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2912 int i;
2913 u8 hlen = 0;
2914 __le16 pkt_size = 0;
2915 struct ethhdr *eth;
2916 u8 mac_type = UNICAST_ADDRESS;
2917
2918#ifdef BNX2X_STOP_ON_ERROR
2919 if (unlikely(bp->panic))
2920 return NETDEV_TX_BUSY;
2921#endif
2922
Ariel Elior6383c0b2011-07-14 08:31:57 +00002923 txq_index = skb_get_queue_mapping(skb);
2924 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002925
Ariel Elior6383c0b2011-07-14 08:31:57 +00002926 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2927
Merav Sicron65565882012-06-19 07:48:26 +00002928 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002929
2930 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00002931 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002932 txq_index, fp_index, txdata_index); */
2933
Ariel Elior6383c0b2011-07-14 08:31:57 +00002934 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00002935 DP(NETIF_MSG_TX_QUEUED,
2936 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002937 txdata->cid, fp_index, txdata_index, txdata, fp); */
2938
2939 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2940 (skb_shinfo(skb)->nr_frags + 3))) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002941 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942 netif_tx_stop_queue(txq);
2943 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2944 return NETDEV_TX_BUSY;
2945 }
2946
Merav Sicron51c1a582012-03-18 10:33:38 +00002947 DP(NETIF_MSG_TX_QUEUED,
2948 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002949 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002950 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2951
2952 eth = (struct ethhdr *)skb->data;
2953
2954 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2955 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2956 if (is_broadcast_ether_addr(eth->h_dest))
2957 mac_type = BROADCAST_ADDRESS;
2958 else
2959 mac_type = MULTICAST_ADDRESS;
2960 }
2961
2962#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2963 /* First, check if we need to linearize the skb (due to FW
2964 restrictions). No need to check fragmentation if page size > 8K
2965 (there will be no violation to FW restrictions) */
2966 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2967 /* Statistics of linearization */
2968 bp->lin_cnt++;
2969 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002970 DP(NETIF_MSG_TX_QUEUED,
2971 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002972 dev_kfree_skb_any(skb);
2973 return NETDEV_TX_OK;
2974 }
2975 }
2976#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002977 /* Map skb linear data for DMA */
2978 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2979 skb_headlen(skb), DMA_TO_DEVICE);
2980 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002981 DP(NETIF_MSG_TX_QUEUED,
2982 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002983 dev_kfree_skb_any(skb);
2984 return NETDEV_TX_OK;
2985 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002986 /*
2987 Please read carefully. First we use one BD which we mark as start,
2988 then we have a parsing info BD (used for TSO or xsum),
2989 and only then we have the rest of the TSO BDs.
2990 (don't forget to mark the last one as last,
2991 and to unmap only AFTER you write to the BD ...)
2992 And above all, all pdb sizes are in words - NOT DWORDS!
2993 */
2994
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002995 /* get current pkt produced now - advance it just before sending packet
2996 * since mapping of pages may fail and cause packet to be dropped
2997 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002998 pkt_prod = txdata->tx_pkt_prod;
2999 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003000
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003001 /* get a tx_buf and first BD
3002 * tx_start_bd may be changed during SPLIT,
3003 * but first_bd will always stay first
3004 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003005 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3006 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003007 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003008
3009 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003010 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
3011 mac_type);
3012
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003013 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003014 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003015
3016 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003017 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003018 tx_buf->skb = skb;
3019 tx_buf->flags = 0;
3020
3021 DP(NETIF_MSG_TX_QUEUED,
3022 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003023 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003024
Jesse Grosseab6d182010-10-20 13:56:03 +00003025 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003026 tx_start_bd->vlan_or_ethertype =
3027 cpu_to_le16(vlan_tx_tag_get(skb));
3028 tx_start_bd->bd_flags.as_bitfield |=
3029 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003030 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003031 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003032
3033 /* turn on parsing and get a BD */
3034 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003035
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003036 if (xmit_type & XMIT_CSUM)
3037 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003038
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003039 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003040 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003041 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3042 /* Set PBD in checksum offload case */
3043 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003044 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3045 &pbd_e2_parsing_data,
3046 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003047 if (IS_MF_SI(bp)) {
3048 /*
3049 * fill in the MAC addresses in the PBD - for local
3050 * switching
3051 */
3052 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3053 &pbd_e2->src_mac_addr_mid,
3054 &pbd_e2->src_mac_addr_lo,
3055 eth->h_source);
3056 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3057 &pbd_e2->dst_mac_addr_mid,
3058 &pbd_e2->dst_mac_addr_lo,
3059 eth->h_dest);
3060 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003061 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003062 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003063 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3064 /* Set PBD in checksum offload case */
3065 if (xmit_type & XMIT_CSUM)
3066 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003067
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003068 }
3069
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003070 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003071 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3072 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003073 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003074 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3075 pkt_size = tx_start_bd->nbytes;
3076
Merav Sicron51c1a582012-03-18 10:33:38 +00003077 DP(NETIF_MSG_TX_QUEUED,
3078 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003079 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3080 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003081 tx_start_bd->bd_flags.as_bitfield,
3082 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003083
3084 if (xmit_type & XMIT_GSO) {
3085
3086 DP(NETIF_MSG_TX_QUEUED,
3087 "TSO packet len %d hlen %d total len %d tso size %d\n",
3088 skb->len, hlen, skb_headlen(skb),
3089 skb_shinfo(skb)->gso_size);
3090
3091 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3092
3093 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003094 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3095 &tx_start_bd, hlen,
3096 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003097 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003098 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3099 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003100 else
3101 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003102 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003103
3104 /* Set the PBD's parsing_data field if not zero
3105 * (for the chips newer than 57711).
3106 */
3107 if (pbd_e2_parsing_data)
3108 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3109
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003110 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3111
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003112 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003113 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3114 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3115
Eric Dumazet9e903e02011-10-18 21:00:24 +00003116 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3117 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003118 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003119 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003120
Merav Sicron51c1a582012-03-18 10:33:38 +00003121 DP(NETIF_MSG_TX_QUEUED,
3122 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003123
3124 /* we need unmap all buffers already mapped
3125 * for this SKB;
3126 * first_bd->nbd need to be properly updated
3127 * before call to bnx2x_free_tx_pkt
3128 */
3129 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003130 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003131 TX_BD(txdata->tx_pkt_prod),
3132 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003133 return NETDEV_TX_OK;
3134 }
3135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003136 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003137 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003138 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003139 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003140
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003141 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3142 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003143 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3144 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003145 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003146
3147 DP(NETIF_MSG_TX_QUEUED,
3148 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3149 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3150 le16_to_cpu(tx_data_bd->nbytes));
3151 }
3152
3153 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3154
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003155 /* update with actual num BDs */
3156 first_bd->nbd = cpu_to_le16(nbd);
3157
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003158 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3159
3160 /* now send a tx doorbell, counting the next BD
3161 * if the packet contains or ends with it
3162 */
3163 if (TX_BD_POFF(bd_prod) < nbd)
3164 nbd++;
3165
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003166 /* total_pkt_bytes should be set on the first data BD if
3167 * it's not an LSO packet and there is more than one
3168 * data BD. In this case pkt_size is limited by an MTU value.
3169 * However we prefer to set it for an LSO packet (while we don't
3170 * have to) in order to save some CPU cycles in a none-LSO
3171 * case, when we much more care about them.
3172 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003173 if (total_pkt_bd != NULL)
3174 total_pkt_bd->total_pkt_bytes = pkt_size;
3175
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003176 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003177 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003178 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003179 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3180 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3181 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3182 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003183 if (pbd_e2)
3184 DP(NETIF_MSG_TX_QUEUED,
3185 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3186 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3187 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3188 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3189 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003190 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3191
Tom Herbert2df1a702011-11-28 16:33:37 +00003192 netdev_tx_sent_queue(txq, skb->len);
3193
Willem de Bruijn8373c572012-04-27 09:04:06 +00003194 skb_tx_timestamp(skb);
3195
Ariel Elior6383c0b2011-07-14 08:31:57 +00003196 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003197 /*
3198 * Make sure that the BD data is updated before updating the producer
3199 * since FW might read the BD right after the producer is updated.
3200 * This is only applicable for weak-ordered memory model archs such
3201 * as IA-64. The following barrier is also mandatory since FW will
3202 * assumes packets must have BDs.
3203 */
3204 wmb();
3205
Ariel Elior6383c0b2011-07-14 08:31:57 +00003206 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003207 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003208
Ariel Elior6383c0b2011-07-14 08:31:57 +00003209 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003210
3211 mmiowb();
3212
Ariel Elior6383c0b2011-07-14 08:31:57 +00003213 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003214
Eric Dumazetbc147862012-06-13 09:45:16 +00003215 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003216 netif_tx_stop_queue(txq);
3217
3218 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3219 * ordering of set_bit() in netif_tx_stop_queue() and read of
3220 * fp->bd_tx_cons */
3221 smp_mb();
3222
Barak Witkowski15192a82012-06-19 07:48:28 +00003223 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Eric Dumazetbc147862012-06-13 09:45:16 +00003224 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003225 netif_tx_wake_queue(txq);
3226 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003227 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003228
3229 return NETDEV_TX_OK;
3230}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003231
Ariel Elior6383c0b2011-07-14 08:31:57 +00003232/**
3233 * bnx2x_setup_tc - routine to configure net_device for multi tc
3234 *
3235 * @netdev: net device to configure
3236 * @tc: number of traffic classes to enable
3237 *
3238 * callback connected to the ndo_setup_tc function pointer
3239 */
3240int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3241{
3242 int cos, prio, count, offset;
3243 struct bnx2x *bp = netdev_priv(dev);
3244
3245 /* setup tc must be called under rtnl lock */
3246 ASSERT_RTNL();
3247
3248 /* no traffic classes requested. aborting */
3249 if (!num_tc) {
3250 netdev_reset_tc(dev);
3251 return 0;
3252 }
3253
3254 /* requested to support too many traffic classes */
3255 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003256 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3257 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003258 return -EINVAL;
3259 }
3260
3261 /* declare amount of supported traffic classes */
3262 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003263 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003264 return -EINVAL;
3265 }
3266
3267 /* configure priority to traffic class mapping */
3268 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3269 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003270 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3271 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003272 prio, bp->prio_to_cos[prio]);
3273 }
3274
3275
3276 /* Use this configuration to diffrentiate tc0 from other COSes
3277 This can be used for ets or pfc, and save the effort of setting
3278 up a multio class queue disc or negotiating DCBX with a switch
3279 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003280 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003281 for (prio = 1; prio < 16; prio++) {
3282 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003283 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003284 } */
3285
3286 /* configure traffic class to transmission queue mapping */
3287 for (cos = 0; cos < bp->max_cos; cos++) {
3288 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003289 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003290 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003291 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3292 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003293 cos, offset, count);
3294 }
3295
3296 return 0;
3297}
3298
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003299/* called with rtnl_lock */
3300int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3301{
3302 struct sockaddr *addr = p;
3303 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003304 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003305
Merav Sicron51c1a582012-03-18 10:33:38 +00003306 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3307 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003308 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003309 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003310
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003311#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003312 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3313 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003314 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003315 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003316 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003317#endif
3318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003319 if (netif_running(dev)) {
3320 rc = bnx2x_set_eth_mac(bp, false);
3321 if (rc)
3322 return rc;
3323 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003324
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003325 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003326 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3327
3328 if (netif_running(dev))
3329 rc = bnx2x_set_eth_mac(bp, true);
3330
3331 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003332}
3333
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003334static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3335{
3336 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3337 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003338 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003339
3340 /* Common */
3341#ifdef BCM_CNIC
3342 if (IS_FCOE_IDX(fp_index)) {
3343 memset(sb, 0, sizeof(union host_hc_status_block));
3344 fp->status_blk_mapping = 0;
3345
3346 } else {
3347#endif
3348 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003349 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003350 BNX2X_PCI_FREE(sb->e2_sb,
3351 bnx2x_fp(bp, fp_index,
3352 status_blk_mapping),
3353 sizeof(struct host_hc_status_block_e2));
3354 else
3355 BNX2X_PCI_FREE(sb->e1x_sb,
3356 bnx2x_fp(bp, fp_index,
3357 status_blk_mapping),
3358 sizeof(struct host_hc_status_block_e1x));
3359#ifdef BCM_CNIC
3360 }
3361#endif
3362 /* Rx */
3363 if (!skip_rx_queue(bp, fp_index)) {
3364 bnx2x_free_rx_bds(fp);
3365
3366 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3367 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3368 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3369 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3370 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3371
3372 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3373 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3374 sizeof(struct eth_fast_path_rx_cqe) *
3375 NUM_RCQ_BD);
3376
3377 /* SGE ring */
3378 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3379 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3380 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3381 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3382 }
3383
3384 /* Tx */
3385 if (!skip_tx_queue(bp, fp_index)) {
3386 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003387 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003388 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003389
Merav Sicron51c1a582012-03-18 10:33:38 +00003390 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003391 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003392 fp_index, cos, txdata->cid);
3393
3394 BNX2X_FREE(txdata->tx_buf_ring);
3395 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3396 txdata->tx_desc_mapping,
3397 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3398 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003399 }
3400 /* end of fastpath */
3401}
3402
3403void bnx2x_free_fp_mem(struct bnx2x *bp)
3404{
3405 int i;
3406 for_each_queue(bp, i)
3407 bnx2x_free_fp_mem_at(bp, i);
3408}
3409
Eric Dumazet1191cb82012-04-27 21:39:21 +00003410static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003411{
3412 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003413 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003414 bnx2x_fp(bp, index, sb_index_values) =
3415 (__le16 *)status_blk.e2_sb->sb.index_values;
3416 bnx2x_fp(bp, index, sb_running_index) =
3417 (__le16 *)status_blk.e2_sb->sb.running_index;
3418 } else {
3419 bnx2x_fp(bp, index, sb_index_values) =
3420 (__le16 *)status_blk.e1x_sb->sb.index_values;
3421 bnx2x_fp(bp, index, sb_running_index) =
3422 (__le16 *)status_blk.e1x_sb->sb.running_index;
3423 }
3424}
3425
Eric Dumazet1191cb82012-04-27 21:39:21 +00003426/* Returns the number of actually allocated BDs */
3427static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3428 int rx_ring_size)
3429{
3430 struct bnx2x *bp = fp->bp;
3431 u16 ring_prod, cqe_ring_prod;
3432 int i, failure_cnt = 0;
3433
3434 fp->rx_comp_cons = 0;
3435 cqe_ring_prod = ring_prod = 0;
3436
3437 /* This routine is called only during fo init so
3438 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3439 */
3440 for (i = 0; i < rx_ring_size; i++) {
3441 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3442 failure_cnt++;
3443 continue;
3444 }
3445 ring_prod = NEXT_RX_IDX(ring_prod);
3446 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3447 WARN_ON(ring_prod <= (i - failure_cnt));
3448 }
3449
3450 if (failure_cnt)
3451 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3452 i - failure_cnt, fp->index);
3453
3454 fp->rx_bd_prod = ring_prod;
3455 /* Limit the CQE producer by the CQE ring size */
3456 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3457 cqe_ring_prod);
3458 fp->rx_pkt = fp->rx_calls = 0;
3459
Barak Witkowski15192a82012-06-19 07:48:28 +00003460 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00003461
3462 return i - failure_cnt;
3463}
3464
3465static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3466{
3467 int i;
3468
3469 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3470 struct eth_rx_cqe_next_page *nextpg;
3471
3472 nextpg = (struct eth_rx_cqe_next_page *)
3473 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3474 nextpg->addr_hi =
3475 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3476 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3477 nextpg->addr_lo =
3478 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3479 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3480 }
3481}
3482
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003483static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3484{
3485 union host_hc_status_block *sb;
3486 struct bnx2x_fastpath *fp = &bp->fp[index];
3487 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003488 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003489 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003490
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003491#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003492 if (!bp->rx_ring_size &&
3493 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003494 rx_ring_size = MIN_RX_SIZE_NONTPA;
3495 bp->rx_ring_size = rx_ring_size;
3496 } else
3497#endif
David S. Miller8decf862011-09-22 03:23:13 -04003498 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003499 u32 cfg = SHMEM_RD(bp,
3500 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003501
David S. Miller8decf862011-09-22 03:23:13 -04003502 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3503
Mintz Yuvald760fc32012-02-15 02:10:28 +00003504 /* Dercease ring size for 1G functions */
3505 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3506 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3507 rx_ring_size /= 10;
3508
David S. Miller8decf862011-09-22 03:23:13 -04003509 /* allocate at least number of buffers required by FW */
3510 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3511 MIN_RX_SIZE_TPA, rx_ring_size);
3512
3513 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003514 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003515 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003516
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003517 /* Common */
3518 sb = &bnx2x_fp(bp, index, status_blk);
3519#ifdef BCM_CNIC
3520 if (!IS_FCOE_IDX(index)) {
3521#endif
3522 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003523 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003524 BNX2X_PCI_ALLOC(sb->e2_sb,
3525 &bnx2x_fp(bp, index, status_blk_mapping),
3526 sizeof(struct host_hc_status_block_e2));
3527 else
3528 BNX2X_PCI_ALLOC(sb->e1x_sb,
3529 &bnx2x_fp(bp, index, status_blk_mapping),
3530 sizeof(struct host_hc_status_block_e1x));
3531#ifdef BCM_CNIC
3532 }
3533#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003534
3535 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3536 * set shortcuts for it.
3537 */
3538 if (!IS_FCOE_IDX(index))
3539 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003540
3541 /* Tx */
3542 if (!skip_tx_queue(bp, index)) {
3543 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003544 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003545 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003546
Merav Sicron51c1a582012-03-18 10:33:38 +00003547 DP(NETIF_MSG_IFUP,
3548 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003549 index, cos);
3550
3551 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003552 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003553 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3554 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003555 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003556 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003557 }
3558
3559 /* Rx */
3560 if (!skip_rx_queue(bp, index)) {
3561 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3562 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3563 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3564 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3565 &bnx2x_fp(bp, index, rx_desc_mapping),
3566 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3567
3568 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3569 &bnx2x_fp(bp, index, rx_comp_mapping),
3570 sizeof(struct eth_fast_path_rx_cqe) *
3571 NUM_RCQ_BD);
3572
3573 /* SGE ring */
3574 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3575 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3576 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3577 &bnx2x_fp(bp, index, rx_sge_mapping),
3578 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3579 /* RX BD ring */
3580 bnx2x_set_next_page_rx_bd(fp);
3581
3582 /* CQ ring */
3583 bnx2x_set_next_page_rx_cq(fp);
3584
3585 /* BDs */
3586 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3587 if (ring_size < rx_ring_size)
3588 goto alloc_mem_err;
3589 }
3590
3591 return 0;
3592
3593/* handles low memory cases */
3594alloc_mem_err:
3595 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3596 index, ring_size);
3597 /* FW will drop all packets if queue is not big enough,
3598 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003599 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003600 */
3601 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003602 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003603 /* release memory allocated for this queue */
3604 bnx2x_free_fp_mem_at(bp, index);
3605 return -ENOMEM;
3606 }
3607 return 0;
3608}
3609
3610int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3611{
3612 int i;
3613
3614 /**
3615 * 1. Allocate FP for leading - fatal if error
3616 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003617 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3618 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003619 */
3620
3621 /* leading */
3622 if (bnx2x_alloc_fp_mem_at(bp, 0))
3623 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003624
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003625#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003626 if (!NO_FCOE(bp))
3627 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00003628 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003629 /* we will fail load process instead of mark
3630 * NO_FCOE_FLAG
3631 */
3632 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003633#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003634
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003635 /* RSS */
3636 for_each_nondefault_eth_queue(bp, i)
3637 if (bnx2x_alloc_fp_mem_at(bp, i))
3638 break;
3639
3640 /* handle memory failures */
3641 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3642 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3643
3644 WARN_ON(delta < 0);
3645#ifdef BCM_CNIC
3646 /**
3647 * move non eth FPs next to last eth FP
3648 * must be done in that order
3649 * FCOE_IDX < FWD_IDX < OOO_IDX
3650 */
3651
Ariel Elior6383c0b2011-07-14 08:31:57 +00003652 /* move FCoE fp even NO_FCOE_FLAG is on */
Merav Sicron65565882012-06-19 07:48:26 +00003653 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003654#endif
3655 bp->num_queues -= delta;
3656 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3657 bp->num_queues + delta, bp->num_queues);
3658 }
3659
3660 return 0;
3661}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003662
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003663void bnx2x_free_mem_bp(struct bnx2x *bp)
3664{
Barak Witkowski15192a82012-06-19 07:48:28 +00003665 kfree(bp->fp->tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003666 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00003667 kfree(bp->sp_objs);
3668 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00003669 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003670 kfree(bp->msix_table);
3671 kfree(bp->ilt);
3672}
3673
3674int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3675{
3676 struct bnx2x_fastpath *fp;
3677 struct msix_entry *tbl;
3678 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003679 int msix_table_size = 0;
Barak Witkowski15192a82012-06-19 07:48:28 +00003680 int fp_array_size;
3681 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003682
Ariel Elior6383c0b2011-07-14 08:31:57 +00003683 /*
3684 * The biggest MSI-X table we might need is as a maximum number of fast
3685 * path IGU SBs plus default SB (for PF).
3686 */
3687 msix_table_size = bp->igu_sb_cnt + 1;
3688
3689 /* fp array: RSS plus CNIC related L2 queues */
Barak Witkowski15192a82012-06-19 07:48:28 +00003690 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3691 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3692
3693 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003694 if (!fp)
3695 goto alloc_err;
Barak Witkowski15192a82012-06-19 07:48:28 +00003696 for (i = 0; i < fp_array_size; i++) {
3697 fp[i].tpa_info =
3698 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3699 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3700 if (!(fp[i].tpa_info))
3701 goto alloc_err;
3702 }
3703
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003704 bp->fp = fp;
3705
Barak Witkowski15192a82012-06-19 07:48:28 +00003706 /* allocate sp objs */
3707 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3708 GFP_KERNEL);
3709 if (!bp->sp_objs)
3710 goto alloc_err;
3711
3712 /* allocate fp_stats */
3713 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3714 GFP_KERNEL);
3715 if (!bp->fp_stats)
3716 goto alloc_err;
3717
Merav Sicron65565882012-06-19 07:48:26 +00003718 /* Allocate memory for the transmission queues array */
3719 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3720#ifdef BCM_CNIC
3721 bp->bnx2x_txq_size++;
3722#endif
3723 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3724 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3725 if (!bp->bnx2x_txq)
3726 goto alloc_err;
3727
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003728 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003729 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003730 if (!tbl)
3731 goto alloc_err;
3732 bp->msix_table = tbl;
3733
3734 /* ilt */
3735 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3736 if (!ilt)
3737 goto alloc_err;
3738 bp->ilt = ilt;
3739
3740 return 0;
3741alloc_err:
3742 bnx2x_free_mem_bp(bp);
3743 return -ENOMEM;
3744
3745}
3746
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003747int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003748{
3749 struct bnx2x *bp = netdev_priv(dev);
3750
3751 if (unlikely(!netif_running(dev)))
3752 return 0;
3753
3754 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3755 return bnx2x_nic_load(bp, LOAD_NORMAL);
3756}
3757
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003758int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3759{
3760 u32 sel_phy_idx = 0;
3761 if (bp->link_params.num_phys <= 1)
3762 return INT_PHY;
3763
3764 if (bp->link_vars.link_up) {
3765 sel_phy_idx = EXT_PHY1;
3766 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3767 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3768 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3769 sel_phy_idx = EXT_PHY2;
3770 } else {
3771
3772 switch (bnx2x_phy_selection(&bp->link_params)) {
3773 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3774 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3775 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3776 sel_phy_idx = EXT_PHY1;
3777 break;
3778 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3779 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3780 sel_phy_idx = EXT_PHY2;
3781 break;
3782 }
3783 }
3784
3785 return sel_phy_idx;
3786
3787}
3788int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3789{
3790 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3791 /*
3792 * The selected actived PHY is always after swapping (in case PHY
3793 * swapping is enabled). So when swapping is enabled, we need to reverse
3794 * the configuration
3795 */
3796
3797 if (bp->link_params.multi_phy_config &
3798 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3799 if (sel_phy_idx == EXT_PHY1)
3800 sel_phy_idx = EXT_PHY2;
3801 else if (sel_phy_idx == EXT_PHY2)
3802 sel_phy_idx = EXT_PHY1;
3803 }
3804 return LINK_CONFIG_IDX(sel_phy_idx);
3805}
3806
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003807#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3808int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3809{
3810 struct bnx2x *bp = netdev_priv(dev);
3811 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3812
3813 switch (type) {
3814 case NETDEV_FCOE_WWNN:
3815 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3816 cp->fcoe_wwn_node_name_lo);
3817 break;
3818 case NETDEV_FCOE_WWPN:
3819 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3820 cp->fcoe_wwn_port_name_lo);
3821 break;
3822 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003823 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003824 return -EINVAL;
3825 }
3826
3827 return 0;
3828}
3829#endif
3830
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003831/* called with rtnl_lock */
3832int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3833{
3834 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003835
3836 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003837 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003838 return -EAGAIN;
3839 }
3840
3841 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00003842 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3843 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003844 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003845 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003846
3847 /* This does not race with packet allocation
3848 * because the actual alloc size is
3849 * only updated as part of load
3850 */
3851 dev->mtu = new_mtu;
3852
Michał Mirosław66371c42011-04-12 09:38:23 +00003853 return bnx2x_reload_if_running(dev);
3854}
3855
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003856netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003857 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003858{
3859 struct bnx2x *bp = netdev_priv(dev);
3860
3861 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003862 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003863 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003864 features &= ~NETIF_F_GRO;
3865 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003866
3867 return features;
3868}
3869
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003870int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003871{
3872 struct bnx2x *bp = netdev_priv(dev);
3873 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003874 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003875
3876 if (features & NETIF_F_LRO)
3877 flags |= TPA_ENABLE_FLAG;
3878 else
3879 flags &= ~TPA_ENABLE_FLAG;
3880
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003881 if (features & NETIF_F_GRO)
3882 flags |= GRO_ENABLE_FLAG;
3883 else
3884 flags &= ~GRO_ENABLE_FLAG;
3885
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003886 if (features & NETIF_F_LOOPBACK) {
3887 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3888 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3889 bnx2x_reload = true;
3890 }
3891 } else {
3892 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3893 bp->link_params.loopback_mode = LOOPBACK_NONE;
3894 bnx2x_reload = true;
3895 }
3896 }
3897
Michał Mirosław66371c42011-04-12 09:38:23 +00003898 if (flags ^ bp->flags) {
3899 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003900 bnx2x_reload = true;
3901 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003902
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003903 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003904 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3905 return bnx2x_reload_if_running(dev);
3906 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003907 }
3908
Michał Mirosław66371c42011-04-12 09:38:23 +00003909 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003910}
3911
3912void bnx2x_tx_timeout(struct net_device *dev)
3913{
3914 struct bnx2x *bp = netdev_priv(dev);
3915
3916#ifdef BNX2X_STOP_ON_ERROR
3917 if (!bp->panic)
3918 bnx2x_panic();
3919#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003920
3921 smp_mb__before_clear_bit();
3922 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3923 smp_mb__after_clear_bit();
3924
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003925 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003926 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003927}
3928
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003929int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3930{
3931 struct net_device *dev = pci_get_drvdata(pdev);
3932 struct bnx2x *bp;
3933
3934 if (!dev) {
3935 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3936 return -ENODEV;
3937 }
3938 bp = netdev_priv(dev);
3939
3940 rtnl_lock();
3941
3942 pci_save_state(pdev);
3943
3944 if (!netif_running(dev)) {
3945 rtnl_unlock();
3946 return 0;
3947 }
3948
3949 netif_device_detach(dev);
3950
3951 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3952
3953 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3954
3955 rtnl_unlock();
3956
3957 return 0;
3958}
3959
3960int bnx2x_resume(struct pci_dev *pdev)
3961{
3962 struct net_device *dev = pci_get_drvdata(pdev);
3963 struct bnx2x *bp;
3964 int rc;
3965
3966 if (!dev) {
3967 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3968 return -ENODEV;
3969 }
3970 bp = netdev_priv(dev);
3971
3972 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003973 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003974 return -EAGAIN;
3975 }
3976
3977 rtnl_lock();
3978
3979 pci_restore_state(pdev);
3980
3981 if (!netif_running(dev)) {
3982 rtnl_unlock();
3983 return 0;
3984 }
3985
3986 bnx2x_set_power_state(bp, PCI_D0);
3987 netif_device_attach(dev);
3988
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003989 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3990
3991 rtnl_unlock();
3992
3993 return rc;
3994}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003995
3996
3997void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3998 u32 cid)
3999{
4000 /* ustorm cxt validation */
4001 cxt->ustorm_ag_context.cdu_usage =
4002 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4003 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4004 /* xcontext validation */
4005 cxt->xstorm_ag_context.cdu_reserved =
4006 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4007 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4008}
4009
Eric Dumazet1191cb82012-04-27 21:39:21 +00004010static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4011 u8 fw_sb_id, u8 sb_index,
4012 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004013{
4014
4015 u32 addr = BAR_CSTRORM_INTMEM +
4016 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4017 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004018 DP(NETIF_MSG_IFUP,
4019 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4020 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004021}
4022
Eric Dumazet1191cb82012-04-27 21:39:21 +00004023static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4024 u16 fw_sb_id, u8 sb_index,
4025 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004026{
4027 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4028 u32 addr = BAR_CSTRORM_INTMEM +
4029 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4030 u16 flags = REG_RD16(bp, addr);
4031 /* clear and set */
4032 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4033 flags |= enable_flag;
4034 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004035 DP(NETIF_MSG_IFUP,
4036 "port %x fw_sb_id %d sb_index %d disable %d\n",
4037 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004038}
4039
4040void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4041 u8 sb_index, u8 disable, u16 usec)
4042{
4043 int port = BP_PORT(bp);
4044 u8 ticks = usec / BNX2X_BTR;
4045
4046 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4047
4048 disable = disable ? 1 : (usec ? 0 : 1);
4049 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4050}