blob: 829befbc20a5f629031451b20d30fb7d251db7ca [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000026#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000027#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000028#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000029#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000043 * source onto the target. Update txdata pointers and related
44 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000050 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000054 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000056
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000060 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000063
Barak Witkowski15192a82012-06-19 07:48:28 +000064 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
Merav Sicron65565882012-06-19 07:48:26 +000070 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087}
88
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030089int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000091/* free skb in the packet ring at pos idx
92 * return idx of last bd freed
93 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000094static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000095 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000097{
Ariel Elior6383c0b2011-07-14 08:31:57 +000098 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000099 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103 int nbd;
104
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
106 prefetch(&skb->end);
107
Merav Sicron51c1a582012-03-18 10:33:38 +0000108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000109 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000110
111 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118#ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
121 bnx2x_panic();
122 }
123#endif
124 new_cons = nbd + tx_buf->first_bd;
125
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129 /* Skip a parse bd... */
130 --nbd;
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135 --nbd;
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137 }
138
139 /* now free frags */
140 while (nbd > 0) {
141
Ariel Elior6383c0b2011-07-14 08:31:57 +0000142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145 if (--nbd)
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147 }
148
149 /* release skb */
150 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000151 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000152 (*pkts_compl)++;
153 (*bytes_compl) += skb->len;
154 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000155
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000156 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000157 tx_buf->first_bd = 0;
158 tx_buf->skb = NULL;
159
160 return new_cons;
161}
162
Ariel Elior6383c0b2011-07-14 08:31:57 +0000163int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000164{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000167 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169#ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
171 return -1;
172#endif
173
Ariel Elior6383c0b2011-07-14 08:31:57 +0000174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000177
178 while (sw_cons != hw_cons) {
179 u16 pkt_cons;
180
181 pkt_cons = TX_BD(sw_cons);
182
Merav Sicron51c1a582012-03-18 10:33:38 +0000183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
Tom Herbert2df1a702011-11-28 16:33:37 +0000187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
189
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000190 sw_cons++;
191 }
192
Tom Herbert2df1a702011-11-28 16:33:37 +0000193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000197
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
202 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206 */
207 smp_mb();
208
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214 *
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
217 * stops the queue
218 */
219
220 __netif_tx_lock(txq, smp_processor_id());
221
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
Eric Dumazetbc147862012-06-13 09:45:16 +0000224 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000225 netif_tx_wake_queue(txq);
226
227 __netif_tx_unlock(txq);
228 }
229 return 0;
230}
231
232static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233 u16 idx)
234{
235 u16 last_max = fp->last_max_sge;
236
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
239}
240
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000241static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242 u16 sge_len,
243 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000244{
245 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000246 u16 last_max, last_elem, first_elem;
247 u16 delta = 0;
248 u16 i;
249
250 if (!sge_len)
251 return;
252
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000257
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000260
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000263 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000265
266 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000269
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
272 last_elem++;
273
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
277 break;
278
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000281 }
282
283 if (delta > 0) {
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
287 }
288
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
292}
293
Eric Dumazete52fcb22011-11-14 06:05:34 +0000294/* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
296 */
297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe)
299{
300 /* Set Toeplitz hash from CQE */
301 if ((bp->dev->features & NETIF_F_RXHASH) &&
302 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
303 return le32_to_cpu(cqe->rss_hash_result);
304 return 0;
305}
306
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000307static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000308 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300309 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000310{
311 struct bnx2x *bp = fp->bp;
312 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
313 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
314 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
315 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300316 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
317 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 /* print error if current state != stop */
320 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000321 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
322
Eric Dumazete52fcb22011-11-14 06:05:34 +0000323 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300324 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000325 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300326 fp->rx_buf_size, DMA_FROM_DEVICE);
327 /*
328 * ...if it fails - move the skb from the consumer to the producer
329 * and set the current aggregation state as ERROR to drop it
330 * when TPA_STOP arrives.
331 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000332
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300333 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
334 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000335 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300336 tpa_info->tpa_state = BNX2X_TPA_ERROR;
337 return;
338 }
339
Eric Dumazete52fcb22011-11-14 06:05:34 +0000340 /* move empty data from pool to prod */
341 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300342 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000343 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000344 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
345 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
346
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300347 /* move partial skb from cons to pool (don't unmap yet) */
348 *first_buf = *cons_rx_buf;
349
350 /* mark bin state as START */
351 tpa_info->parsing_flags =
352 le16_to_cpu(cqe->pars_flags.flags);
353 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
354 tpa_info->tpa_state = BNX2X_TPA_START;
355 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
356 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000357 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000358 if (fp->mode == TPA_MODE_GRO) {
359 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
360 tpa_info->full_page =
361 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
362 tpa_info->gro_size = gro_size;
363 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300364
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000365#ifdef BNX2X_STOP_ON_ERROR
366 fp->tpa_queue_used |= (1 << queue);
367#ifdef _ASM_GENERIC_INT_L64_H
368 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
369#else
370 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
371#endif
372 fp->tpa_queue_used);
373#endif
374}
375
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000376/* Timestamp option length allowed for TPA aggregation:
377 *
378 * nop nop kind length echo val
379 */
380#define TPA_TSTAMP_OPT_LEN 12
381/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000382 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000383 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000384 * @bp: driver handle
385 * @parsing_flags: parsing flags from the START CQE
386 * @len_on_bd: total length of the first packet for the
387 * aggregation.
388 *
389 * Approximate value of the MSS for this aggregation calculated using
390 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000391 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000392static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
393 u16 len_on_bd)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000394{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300395 /*
396 * TPA arrgregation won't have either IP options or TCP options
397 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000398 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300399 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
400
401 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
402 PRS_FLAG_OVERETH_IPV6)
403 hdrs_len += sizeof(struct ipv6hdr);
404 else /* IPv4 */
405 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000406
407
408 /* Check if there was a TCP timestamp, if there is it's will
409 * always be 12 bytes length: nop nop kind length echo val.
410 *
411 * Otherwise FW would close the aggregation.
412 */
413 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
414 hdrs_len += TPA_TSTAMP_OPT_LEN;
415
416 return len_on_bd - hdrs_len;
417}
418
Eric Dumazet1191cb82012-04-27 21:39:21 +0000419static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
420 struct bnx2x_fastpath *fp, u16 index)
421{
422 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
423 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
424 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
425 dma_addr_t mapping;
426
427 if (unlikely(page == NULL)) {
428 BNX2X_ERR("Can't alloc sge\n");
429 return -ENOMEM;
430 }
431
432 mapping = dma_map_page(&bp->pdev->dev, page, 0,
433 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
434 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
435 __free_pages(page, PAGES_PER_SGE_SHIFT);
436 BNX2X_ERR("Can't map sge\n");
437 return -ENOMEM;
438 }
439
440 sw_buf->page = page;
441 dma_unmap_addr_set(sw_buf, mapping, mapping);
442
443 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
444 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
445
446 return 0;
447}
448
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000449static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000450 struct bnx2x_agg_info *tpa_info,
451 u16 pages,
452 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300453 struct eth_end_agg_rx_cqe *cqe,
454 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000455{
456 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000457 u32 i, frag_len, frag_size;
458 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300459 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000460 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000461
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300462 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000463
464 if (fp->mode == TPA_MODE_GRO) {
465 gro_size = tpa_info->gro_size;
466 full_page = tpa_info->full_page;
467 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000468
469 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000470 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300471 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
472 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000473
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000474 /* set for GRO */
475 if (fp->mode == TPA_MODE_GRO)
476 skb_shinfo(skb)->gso_type =
477 (GET_FLAG(tpa_info->parsing_flags,
478 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
479 PRS_FLAG_OVERETH_IPV6) ?
480 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
481 }
482
483
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000484#ifdef BNX2X_STOP_ON_ERROR
485 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
486 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
487 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300488 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000489 bnx2x_panic();
490 return -EINVAL;
491 }
492#endif
493
494 /* Run through the SGL and compose the fragmented skb */
495 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300496 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497
498 /* FW gives the indices of the SGE as if the ring is an array
499 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000500 if (fp->mode == TPA_MODE_GRO)
501 frag_len = min_t(u32, frag_size, (u32)full_page);
502 else /* LRO */
503 frag_len = min_t(u32, frag_size,
504 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
505
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000506 rx_pg = &fp->rx_page_ring[sge_idx];
507 old_rx_pg = *rx_pg;
508
509 /* If we fail to allocate a substitute page, we simply stop
510 where we are and drop the whole packet */
511 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
512 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000513 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 return err;
515 }
516
517 /* Unmap the page as we r going to pass it to the stack */
518 dma_unmap_page(&bp->pdev->dev,
519 dma_unmap_addr(&old_rx_pg, mapping),
520 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000521 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000522 if (fp->mode == TPA_MODE_LRO)
523 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
524 else { /* GRO */
525 int rem;
526 int offset = 0;
527 for (rem = frag_len; rem > 0; rem -= gro_size) {
528 int len = rem > gro_size ? gro_size : rem;
529 skb_fill_page_desc(skb, frag_id++,
530 old_rx_pg.page, offset, len);
531 if (offset)
532 get_page(old_rx_pg.page);
533 offset += len;
534 }
535 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536
537 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000538 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000539 skb->len += frag_len;
540
541 frag_size -= frag_len;
542 }
543
544 return 0;
545}
546
Eric Dumazet1191cb82012-04-27 21:39:21 +0000547static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 struct bnx2x_agg_info *tpa_info,
549 u16 pages,
550 struct eth_end_agg_rx_cqe *cqe,
551 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000552{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300553 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000554 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000556 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000557 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300558 u8 old_tpa_state = tpa_info->tpa_state;
559
560 tpa_info->tpa_state = BNX2X_TPA_STOP;
561
562 /* If we there was an error during the handling of the TPA_START -
563 * drop this aggregation.
564 */
565 if (old_tpa_state == BNX2X_TPA_ERROR)
566 goto drop;
567
Eric Dumazete52fcb22011-11-14 06:05:34 +0000568 /* Try to allocate the new data */
569 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000570
571 /* Unmap skb in the pool anyway, as we are going to change
572 pool entry status to BNX2X_TPA_STOP even if new skb allocation
573 fails. */
574 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800575 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000576 if (likely(new_data))
Eric Dumazetd3836f22012-04-27 00:33:38 +0000577 skb = build_skb(data, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
Eric Dumazete52fcb22011-11-14 06:05:34 +0000579 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800581 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000582 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800583 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000584 bnx2x_panic();
585 return;
586 }
587#endif
588
Eric Dumazete52fcb22011-11-14 06:05:34 +0000589 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000590 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000591 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000592
593 skb->protocol = eth_type_trans(skb, bp->dev);
594 skb->ip_summed = CHECKSUM_UNNECESSARY;
595
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000596 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
597 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300598 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
599 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000600 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000601 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000602 DP(NETIF_MSG_RX_STATUS,
603 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000604 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000605 }
606
607
Eric Dumazete52fcb22011-11-14 06:05:34 +0000608 /* put new data in bin */
609 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000610
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300611 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000612 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000613 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300614drop:
615 /* drop the packet and keep the buffer in the bin */
616 DP(NETIF_MSG_RX_STATUS,
617 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000618 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000619}
620
Eric Dumazet1191cb82012-04-27 21:39:21 +0000621static int bnx2x_alloc_rx_data(struct bnx2x *bp,
622 struct bnx2x_fastpath *fp, u16 index)
623{
624 u8 *data;
625 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
626 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
627 dma_addr_t mapping;
628
629 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
630 if (unlikely(data == NULL))
631 return -ENOMEM;
632
633 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
634 fp->rx_buf_size,
635 DMA_FROM_DEVICE);
636 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
637 kfree(data);
638 BNX2X_ERR("Can't map rx data\n");
639 return -ENOMEM;
640 }
641
642 rx_buf->data = data;
643 dma_unmap_addr_set(rx_buf, mapping, mapping);
644
645 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
646 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
647
648 return 0;
649}
650
Barak Witkowski15192a82012-06-19 07:48:28 +0000651static
652void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
653 struct bnx2x_fastpath *fp,
654 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000655{
656 /* Do nothing if no IP/L4 csum validation was done */
657
658 if (cqe->fast_path_cqe.status_flags &
659 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
660 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
661 return;
662
663 /* If both IP/L4 validation were done, check if an error was found. */
664
665 if (cqe->fast_path_cqe.type_error_flags &
666 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
667 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000668 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000669 else
670 skb->ip_summed = CHECKSUM_UNNECESSARY;
671}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000672
673int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
674{
675 struct bnx2x *bp = fp->bp;
676 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
677 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
678 int rx_pkt = 0;
679
680#ifdef BNX2X_STOP_ON_ERROR
681 if (unlikely(bp->panic))
682 return 0;
683#endif
684
685 /* CQ "next element" is of the size of the regular element,
686 that's why it's ok here */
687 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
688 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
689 hw_comp_cons++;
690
691 bd_cons = fp->rx_bd_cons;
692 bd_prod = fp->rx_bd_prod;
693 bd_prod_fw = bd_prod;
694 sw_comp_cons = fp->rx_comp_cons;
695 sw_comp_prod = fp->rx_comp_prod;
696
697 /* Memory barrier necessary as speculative reads of the rx
698 * buffer can be ahead of the index in the status block
699 */
700 rmb();
701
702 DP(NETIF_MSG_RX_STATUS,
703 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
704 fp->index, hw_comp_cons, sw_comp_cons);
705
706 while (sw_comp_cons != hw_comp_cons) {
707 struct sw_rx_bd *rx_buf = NULL;
708 struct sk_buff *skb;
709 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300710 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000711 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300712 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000713 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000714 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000715
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300716#ifdef BNX2X_STOP_ON_ERROR
717 if (unlikely(bp->panic))
718 return 0;
719#endif
720
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000721 comp_ring_cons = RCQ_BD(sw_comp_cons);
722 bd_prod = RX_BD(bd_prod);
723 bd_cons = RX_BD(bd_cons);
724
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300726 cqe_fp = &cqe->fast_path_cqe;
727 cqe_fp_flags = cqe_fp->type_error_flags;
728 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000729
Merav Sicron51c1a582012-03-18 10:33:38 +0000730 DP(NETIF_MSG_RX_STATUS,
731 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
732 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300733 cqe_fp_flags, cqe_fp->status_flags,
734 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000735 le16_to_cpu(cqe_fp->vlan_tag),
736 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000737
738 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300739 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000740 bnx2x_sp_event(fp, cqe);
741 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000742 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000743
Eric Dumazete52fcb22011-11-14 06:05:34 +0000744 rx_buf = &fp->rx_buf_ring[bd_cons];
745 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000746
Eric Dumazete52fcb22011-11-14 06:05:34 +0000747 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000748 struct bnx2x_agg_info *tpa_info;
749 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300750#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000751 /* sanity check */
752 if (fp->disable_tpa &&
753 (CQE_TYPE_START(cqe_fp_type) ||
754 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000755 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000756 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300757#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000758
Eric Dumazete52fcb22011-11-14 06:05:34 +0000759 if (CQE_TYPE_START(cqe_fp_type)) {
760 u16 queue = cqe_fp->queue_index;
761 DP(NETIF_MSG_RX_STATUS,
762 "calling tpa_start on queue %d\n",
763 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000764
Eric Dumazete52fcb22011-11-14 06:05:34 +0000765 bnx2x_tpa_start(fp, queue,
766 bd_cons, bd_prod,
767 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000768
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000769 goto next_rx;
770
771 }
772 queue = cqe->end_agg_cqe.queue_index;
773 tpa_info = &fp->tpa_info[queue];
774 DP(NETIF_MSG_RX_STATUS,
775 "calling tpa_stop on queue %d\n",
776 queue);
777
778 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
779 tpa_info->len_on_bd;
780
781 if (fp->mode == TPA_MODE_GRO)
782 pages = (frag_size + tpa_info->full_page - 1) /
783 tpa_info->full_page;
784 else
785 pages = SGE_PAGE_ALIGN(frag_size) >>
786 SGE_PAGE_SHIFT;
787
788 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
789 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000790#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000791 if (bp->panic)
792 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000793#endif
794
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000795 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
796 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000797 }
798 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000799 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000800 pad = cqe_fp->placement_offset;
801 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000803 pad + RX_COPY_THRESH,
804 DMA_FROM_DEVICE);
805 pad += NET_SKB_PAD;
806 prefetch(data + pad); /* speedup eth_type_trans() */
807 /* is this an error packet? */
808 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000809 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000810 "ERROR flags %x rx packet %u\n",
811 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000812 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000813 goto reuse_rx;
814 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000815
Eric Dumazete52fcb22011-11-14 06:05:34 +0000816 /* Since we don't have a jumbo ring
817 * copy small packets if mtu > 1500
818 */
819 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
820 (len <= RX_COPY_THRESH)) {
821 skb = netdev_alloc_skb_ip_align(bp->dev, len);
822 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000823 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000824 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000825 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000826 goto reuse_rx;
827 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000828 memcpy(skb->data, data + pad, len);
829 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
830 } else {
831 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000832 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000833 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800834 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000835 DMA_FROM_DEVICE);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000836 skb = build_skb(data, 0);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000837 if (unlikely(!skb)) {
838 kfree(data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000839 bnx2x_fp_qstats(bp, fp)->
840 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000841 goto next_rx;
842 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000843 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000844 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000845 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
846 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000847 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000848reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000849 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000850 goto next_rx;
851 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000852 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000853
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000854 skb_put(skb, len);
855 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000856
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000857 /* Set Toeplitz hash for a none-LRO skb */
858 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000859
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000860 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000861
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000862 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000863 bnx2x_csum_validate(skb, cqe, fp,
864 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000866 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000867
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300868 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
869 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000870 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300871 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000872 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000873
874
875next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000876 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000877
878 bd_cons = NEXT_RX_IDX(bd_cons);
879 bd_prod = NEXT_RX_IDX(bd_prod);
880 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
881 rx_pkt++;
882next_cqe:
883 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
884 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
885
886 if (rx_pkt == budget)
887 break;
888 } /* while */
889
890 fp->rx_bd_cons = bd_cons;
891 fp->rx_bd_prod = bd_prod_fw;
892 fp->rx_comp_cons = sw_comp_cons;
893 fp->rx_comp_prod = sw_comp_prod;
894
895 /* Update producers */
896 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
897 fp->rx_sge_prod);
898
899 fp->rx_pkt += rx_pkt;
900 fp->rx_calls++;
901
902 return rx_pkt;
903}
904
905static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
906{
907 struct bnx2x_fastpath *fp = fp_cookie;
908 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000909 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910
Merav Sicron51c1a582012-03-18 10:33:38 +0000911 DP(NETIF_MSG_INTR,
912 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000913 fp->index, fp->fw_sb_id, fp->igu_sb_id);
914 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915
916#ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
918 return IRQ_HANDLED;
919#endif
920
921 /* Handle Rx and Tx according to MSI-X vector */
922 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000923
924 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +0000925 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000926
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000927 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000928 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
929
930 return IRQ_HANDLED;
931}
932
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000933/* HW Lock for shared dual port PHYs */
934void bnx2x_acquire_phy_lock(struct bnx2x *bp)
935{
936 mutex_lock(&bp->port.phy_mutex);
937
938 if (bp->port.need_hw_lock)
939 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
940}
941
942void bnx2x_release_phy_lock(struct bnx2x *bp)
943{
944 if (bp->port.need_hw_lock)
945 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
946
947 mutex_unlock(&bp->port.phy_mutex);
948}
949
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800950/* calculates MF speed according to current linespeed and MF configuration */
951u16 bnx2x_get_mf_speed(struct bnx2x *bp)
952{
953 u16 line_speed = bp->link_vars.line_speed;
954 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000955 u16 maxCfg = bnx2x_extract_max_cfg(bp,
956 bp->mf_config[BP_VN(bp)]);
957
958 /* Calculate the current MAX line speed limit for the MF
959 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800960 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000961 if (IS_MF_SI(bp))
962 line_speed = (line_speed * maxCfg) / 100;
963 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800964 u16 vn_max_rate = maxCfg * 100;
965
966 if (vn_max_rate < line_speed)
967 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000968 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800969 }
970
971 return line_speed;
972}
973
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000974/**
975 * bnx2x_fill_report_data - fill link report data to report
976 *
977 * @bp: driver handle
978 * @data: link state to update
979 *
980 * It uses a none-atomic bit operations because is called under the mutex.
981 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000982static void bnx2x_fill_report_data(struct bnx2x *bp,
983 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000984{
985 u16 line_speed = bnx2x_get_mf_speed(bp);
986
987 memset(data, 0, sizeof(*data));
988
989 /* Fill the report data: efective line speed */
990 data->line_speed = line_speed;
991
992 /* Link is down */
993 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
994 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
995 &data->link_report_flags);
996
997 /* Full DUPLEX */
998 if (bp->link_vars.duplex == DUPLEX_FULL)
999 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1000
1001 /* Rx Flow Control is ON */
1002 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1003 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1004
1005 /* Tx Flow Control is ON */
1006 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1007 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1008}
1009
1010/**
1011 * bnx2x_link_report - report link status to OS.
1012 *
1013 * @bp: driver handle
1014 *
1015 * Calls the __bnx2x_link_report() under the same locking scheme
1016 * as a link/PHY state managing code to ensure a consistent link
1017 * reporting.
1018 */
1019
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001020void bnx2x_link_report(struct bnx2x *bp)
1021{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001022 bnx2x_acquire_phy_lock(bp);
1023 __bnx2x_link_report(bp);
1024 bnx2x_release_phy_lock(bp);
1025}
1026
1027/**
1028 * __bnx2x_link_report - report link status to OS.
1029 *
1030 * @bp: driver handle
1031 *
1032 * None atomic inmlementation.
1033 * Should be called under the phy_lock.
1034 */
1035void __bnx2x_link_report(struct bnx2x *bp)
1036{
1037 struct bnx2x_link_report_data cur_data;
1038
1039 /* reread mf_cfg */
1040 if (!CHIP_IS_E1(bp))
1041 bnx2x_read_mf_cfg(bp);
1042
1043 /* Read the current link report info */
1044 bnx2x_fill_report_data(bp, &cur_data);
1045
1046 /* Don't report link down or exactly the same link status twice */
1047 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1048 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1049 &bp->last_reported_link.link_report_flags) &&
1050 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1051 &cur_data.link_report_flags)))
1052 return;
1053
1054 bp->link_cnt++;
1055
1056 /* We are going to report a new link parameters now -
1057 * remember the current data for the next time.
1058 */
1059 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1060
1061 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001063 netif_carrier_off(bp->dev);
1064 netdev_err(bp->dev, "NIC Link is Down\n");
1065 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001066 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001067 const char *duplex;
1068 const char *flow;
1069
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001070 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001072 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1073 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001074 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075 else
Joe Perches94f05b02011-08-14 12:16:20 +00001076 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001078 /* Handle the FC at the end so that only these flags would be
1079 * possibly set. This way we may easily check if there is no FC
1080 * enabled.
1081 */
1082 if (cur_data.link_report_flags) {
1083 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1084 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001085 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1086 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001087 flow = "ON - receive & transmit";
1088 else
1089 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001090 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001091 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001092 }
Joe Perches94f05b02011-08-14 12:16:20 +00001093 } else {
1094 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001095 }
Joe Perches94f05b02011-08-14 12:16:20 +00001096 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1097 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098 }
1099}
1100
Eric Dumazet1191cb82012-04-27 21:39:21 +00001101static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1102{
1103 int i;
1104
1105 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1106 struct eth_rx_sge *sge;
1107
1108 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1109 sge->addr_hi =
1110 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1111 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1112
1113 sge->addr_lo =
1114 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1115 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1116 }
1117}
1118
1119static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1120 struct bnx2x_fastpath *fp, int last)
1121{
1122 int i;
1123
1124 for (i = 0; i < last; i++) {
1125 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1126 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1127 u8 *data = first_buf->data;
1128
1129 if (data == NULL) {
1130 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1131 continue;
1132 }
1133 if (tpa_info->tpa_state == BNX2X_TPA_START)
1134 dma_unmap_single(&bp->pdev->dev,
1135 dma_unmap_addr(first_buf, mapping),
1136 fp->rx_buf_size, DMA_FROM_DEVICE);
1137 kfree(data);
1138 first_buf->data = NULL;
1139 }
1140}
1141
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001142void bnx2x_init_rx_rings(struct bnx2x *bp)
1143{
1144 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001145 u16 ring_prod;
1146 int i, j;
1147
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001148 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001149 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001150 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001151
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001152 DP(NETIF_MSG_IFUP,
1153 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1154
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001155 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001156 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001157 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001158 struct bnx2x_agg_info *tpa_info =
1159 &fp->tpa_info[i];
1160 struct sw_rx_bd *first_buf =
1161 &tpa_info->first_buf;
1162
Eric Dumazete52fcb22011-11-14 06:05:34 +00001163 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1164 GFP_ATOMIC);
1165 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001166 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1167 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001168 bnx2x_free_tpa_pool(bp, fp, i);
1169 fp->disable_tpa = 1;
1170 break;
1171 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001172 dma_unmap_addr_set(first_buf, mapping, 0);
1173 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001174 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001175
1176 /* "next page" elements initialization */
1177 bnx2x_set_next_page_sgl(fp);
1178
1179 /* set SGEs bit mask */
1180 bnx2x_init_sge_ring_bit_mask(fp);
1181
1182 /* Allocate SGEs and initialize the ring elements */
1183 for (i = 0, ring_prod = 0;
1184 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1185
1186 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001187 BNX2X_ERR("was only able to allocate %d rx sges\n",
1188 i);
1189 BNX2X_ERR("disabling TPA for queue[%d]\n",
1190 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001191 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001192 bnx2x_free_rx_sge_range(bp, fp,
1193 ring_prod);
1194 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001195 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001196 fp->disable_tpa = 1;
1197 ring_prod = 0;
1198 break;
1199 }
1200 ring_prod = NEXT_SGE_IDX(ring_prod);
1201 }
1202
1203 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204 }
1205 }
1206
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001207 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001208 struct bnx2x_fastpath *fp = &bp->fp[j];
1209
1210 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001211
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001212 /* Activate BD ring */
1213 /* Warning!
1214 * this will generate an interrupt (to the TSTORM)
1215 * must only be done after chip is initialized
1216 */
1217 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1218 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001219
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220 if (j != 0)
1221 continue;
1222
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001223 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001224 REG_WR(bp, BAR_USTRORM_INTMEM +
1225 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1226 U64_LO(fp->rx_comp_mapping));
1227 REG_WR(bp, BAR_USTRORM_INTMEM +
1228 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1229 U64_HI(fp->rx_comp_mapping));
1230 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001231 }
1232}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001233
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001234static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1235{
1236 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001237 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001238
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001239 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001240 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001241 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00001242 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001243 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001244
Ariel Elior6383c0b2011-07-14 08:31:57 +00001245 u16 sw_prod = txdata->tx_pkt_prod;
1246 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001247
Ariel Elior6383c0b2011-07-14 08:31:57 +00001248 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001249 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1250 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001251 sw_cons++;
1252 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001253 netdev_tx_reset_queue(
Merav Sicron65565882012-06-19 07:48:26 +00001254 netdev_get_tx_queue(bp->dev,
1255 txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001256 }
1257 }
1258}
1259
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001260static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1261{
1262 struct bnx2x *bp = fp->bp;
1263 int i;
1264
1265 /* ring wasn't allocated */
1266 if (fp->rx_buf_ring == NULL)
1267 return;
1268
1269 for (i = 0; i < NUM_RX_BD; i++) {
1270 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001271 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001272
Eric Dumazete52fcb22011-11-14 06:05:34 +00001273 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001274 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001275 dma_unmap_single(&bp->pdev->dev,
1276 dma_unmap_addr(rx_buf, mapping),
1277 fp->rx_buf_size, DMA_FROM_DEVICE);
1278
Eric Dumazete52fcb22011-11-14 06:05:34 +00001279 rx_buf->data = NULL;
1280 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001281 }
1282}
1283
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001284static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1285{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001286 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001287
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001288 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001289 struct bnx2x_fastpath *fp = &bp->fp[j];
1290
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001291 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001293 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001294 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295 }
1296}
1297
1298void bnx2x_free_skbs(struct bnx2x *bp)
1299{
1300 bnx2x_free_tx_skbs(bp);
1301 bnx2x_free_rx_skbs(bp);
1302}
1303
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001304void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1305{
1306 /* load old values */
1307 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1308
1309 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1310 /* leave all but MAX value */
1311 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1312
1313 /* set new MAX value */
1314 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1315 & FUNC_MF_CFG_MAX_BW_MASK;
1316
1317 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1318 }
1319}
1320
Dmitry Kravkovca924292011-06-14 01:33:08 +00001321/**
1322 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1323 *
1324 * @bp: driver handle
1325 * @nvecs: number of vectors to be released
1326 */
1327static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001328{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001329 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001330
Dmitry Kravkovca924292011-06-14 01:33:08 +00001331 if (nvecs == offset)
1332 return;
1333 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001334 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001335 bp->msix_table[offset].vector);
1336 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001337#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001338 if (nvecs == offset)
1339 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001340 offset++;
1341#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001342
Dmitry Kravkovca924292011-06-14 01:33:08 +00001343 for_each_eth_queue(bp, i) {
1344 if (nvecs == offset)
1345 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001346 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1347 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001348
1349 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001350 }
1351}
1352
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001353void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001355 if (bp->flags & USING_MSIX_FLAG &&
1356 !(bp->flags & USING_SINGLE_MSIX_FLAG))
Dmitry Kravkovca924292011-06-14 01:33:08 +00001357 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001358 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001359 else
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001360 free_irq(bp->dev->irq, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001361}
1362
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001363int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001365 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001366
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001367 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001368 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001369 bp->msix_table[0].entry);
1370 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001371
1372#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001373 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001374 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001375 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1376 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001377#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001378 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001379 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001380 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001381 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1382 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001383 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384 }
1385
Ariel Elior6383c0b2011-07-14 08:31:57 +00001386 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001387
1388 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001389
1390 /*
1391 * reconfigure number of tx/rx queues according to available
1392 * MSI-X vectors
1393 */
1394 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001395 /* how less vectors we will have? */
1396 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001397
Merav Sicron51c1a582012-03-18 10:33:38 +00001398 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001399
1400 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1401
1402 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001403 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1404 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001405 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001406 /*
1407 * decrease number of queues by number of unallocated entries
1408 */
1409 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410
Merav Sicron51c1a582012-03-18 10:33:38 +00001411 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001412 bp->num_queues);
1413 } else if (rc > 0) {
1414 /* Get by with single vector */
1415 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1416 if (rc) {
1417 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1418 rc);
1419 goto no_msix;
1420 }
1421
1422 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1423 bp->flags |= USING_SINGLE_MSIX_FLAG;
1424
1425 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001426 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001427 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001428 }
1429
1430 bp->flags |= USING_MSIX_FLAG;
1431
1432 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001433
1434no_msix:
1435 /* fall to INTx if not enough memory */
1436 if (rc == -ENOMEM)
1437 bp->flags |= DISABLE_MSI_FLAG;
1438
1439 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001440}
1441
1442static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1443{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001444 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001445
Dmitry Kravkovca924292011-06-14 01:33:08 +00001446 rc = request_irq(bp->msix_table[offset++].vector,
1447 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001448 bp->dev->name, bp->dev);
1449 if (rc) {
1450 BNX2X_ERR("request sp irq failed\n");
1451 return -EBUSY;
1452 }
1453
1454#ifdef BCM_CNIC
1455 offset++;
1456#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001457 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458 struct bnx2x_fastpath *fp = &bp->fp[i];
1459 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1460 bp->dev->name, i);
1461
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001462 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001463 bnx2x_msix_fp_int, 0, fp->name, fp);
1464 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001465 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1466 bp->msix_table[offset].vector, rc);
1467 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001468 return -EBUSY;
1469 }
1470
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001471 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472 }
1473
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001474 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001475 offset = 1 + CNIC_PRESENT;
Merav Sicron51c1a582012-03-18 10:33:38 +00001476 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001477 bp->msix_table[0].vector,
1478 0, bp->msix_table[offset].vector,
1479 i - 1, bp->msix_table[offset + i - 1].vector);
1480
1481 return 0;
1482}
1483
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001484int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485{
1486 int rc;
1487
1488 rc = pci_enable_msi(bp->pdev);
1489 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001490 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001491 return -1;
1492 }
1493 bp->flags |= USING_MSI_FLAG;
1494
1495 return 0;
1496}
1497
1498static int bnx2x_req_irq(struct bnx2x *bp)
1499{
1500 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001501 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001502
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001503 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001504 flags = 0;
1505 else
1506 flags = IRQF_SHARED;
1507
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001508 if (bp->flags & USING_MSIX_FLAG)
1509 irq = bp->msix_table[0].vector;
1510 else
1511 irq = bp->pdev->irq;
1512
1513 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001514}
1515
Eric Dumazet1191cb82012-04-27 21:39:21 +00001516static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001517{
1518 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001519 if (bp->flags & USING_MSIX_FLAG &&
1520 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001521 rc = bnx2x_req_msix_irqs(bp);
1522 if (rc)
1523 return rc;
1524 } else {
1525 bnx2x_ack_int(bp);
1526 rc = bnx2x_req_irq(bp);
1527 if (rc) {
1528 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1529 return rc;
1530 }
1531 if (bp->flags & USING_MSI_FLAG) {
1532 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001533 netdev_info(bp->dev, "using MSI IRQ %d\n",
1534 bp->dev->irq);
1535 }
1536 if (bp->flags & USING_MSIX_FLAG) {
1537 bp->dev->irq = bp->msix_table[0].vector;
1538 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1539 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001540 }
1541 }
1542
1543 return 0;
1544}
1545
Eric Dumazet1191cb82012-04-27 21:39:21 +00001546static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001547{
1548 int i;
1549
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001550 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001551 napi_enable(&bnx2x_fp(bp, i, napi));
1552}
1553
Eric Dumazet1191cb82012-04-27 21:39:21 +00001554static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001555{
1556 int i;
1557
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001558 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001559 napi_disable(&bnx2x_fp(bp, i, napi));
1560}
1561
1562void bnx2x_netif_start(struct bnx2x *bp)
1563{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001564 if (netif_running(bp->dev)) {
1565 bnx2x_napi_enable(bp);
1566 bnx2x_int_enable(bp);
1567 if (bp->state == BNX2X_STATE_OPEN)
1568 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001569 }
1570}
1571
1572void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1573{
1574 bnx2x_int_disable_sync(bp, disable_hw);
1575 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001576}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001577
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001578u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1579{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001580 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001581
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001582#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001583 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001584 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1585 u16 ether_type = ntohs(hdr->h_proto);
1586
1587 /* Skip VLAN tag if present */
1588 if (ether_type == ETH_P_8021Q) {
1589 struct vlan_ethhdr *vhdr =
1590 (struct vlan_ethhdr *)skb->data;
1591
1592 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1593 }
1594
1595 /* If ethertype is FCoE or FIP - use FCoE ring */
1596 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001597 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001598 }
1599#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001600 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001601 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001602}
1603
Dmitry Kravkov96305232012-04-03 18:41:30 +00001604
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001605void bnx2x_set_num_queues(struct bnx2x *bp)
1606{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001607 /* RSS queues */
1608 bp->num_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001609
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001610#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00001611 /* override in STORAGE SD modes */
1612 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001613 bp->num_queues = 1;
1614#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001615 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001616 bp->num_queues += NON_ETH_CONTEXT_USE;
Merav Sicron65565882012-06-19 07:48:26 +00001617
1618 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001619}
1620
David S. Miller823dcd22011-08-20 10:39:12 -07001621/**
1622 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1623 *
1624 * @bp: Driver handle
1625 *
1626 * We currently support for at most 16 Tx queues for each CoS thus we will
1627 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1628 * bp->max_cos.
1629 *
1630 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1631 * index after all ETH L2 indices.
1632 *
1633 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1634 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1635 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1636 *
1637 * The proper configuration of skb->queue_mapping is handled by
1638 * bnx2x_select_queue() and __skb_tx_hash().
1639 *
1640 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1641 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1642 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001643static int bnx2x_set_real_num_queues(struct bnx2x *bp)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001644{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001645 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001646
Merav Sicron65565882012-06-19 07:48:26 +00001647 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1648 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001649
1650/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001651#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001652 if (!NO_FCOE(bp)) {
1653 rx += FCOE_PRESENT;
1654 tx += FCOE_PRESENT;
1655 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001656#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001657
1658 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1659 if (rc) {
1660 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1661 return rc;
1662 }
1663 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1664 if (rc) {
1665 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1666 return rc;
1667 }
1668
Merav Sicron51c1a582012-03-18 10:33:38 +00001669 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001670 tx, rx);
1671
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001672 return rc;
1673}
1674
Eric Dumazet1191cb82012-04-27 21:39:21 +00001675static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001676{
1677 int i;
1678
1679 for_each_queue(bp, i) {
1680 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001681 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001682
1683 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1684 if (IS_FCOE_IDX(i))
1685 /*
1686 * Although there are no IP frames expected to arrive to
1687 * this ring we still want to add an
1688 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1689 * overrun attack.
1690 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001691 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001692 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001693 mtu = bp->dev->mtu;
1694 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1695 IP_HEADER_ALIGNMENT_PADDING +
1696 ETH_OVREHEAD +
1697 mtu +
1698 BNX2X_FW_RX_ALIGN_END;
1699 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001700 }
1701}
1702
Eric Dumazet1191cb82012-04-27 21:39:21 +00001703static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001704{
1705 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001706 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1707
Dmitry Kravkov96305232012-04-03 18:41:30 +00001708 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001709 * enabled
1710 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001711 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1712 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001713 bp->fp->cl_id +
1714 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001715
1716 /*
1717 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1718 * per-port, so if explicit configuration is needed , do it only
1719 * for a PMF.
1720 *
1721 * For 57712 and newer on the other hand it's a per-function
1722 * configuration.
1723 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001724 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001725}
1726
Dmitry Kravkov96305232012-04-03 18:41:30 +00001727int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001728 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001729{
Yuval Mintz3b603062012-03-18 10:33:39 +00001730 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001731 int i;
1732
1733 /* Although RSS is meaningless when there is a single HW queue we
1734 * still need it enabled in order to have HW Rx hash generated.
1735 *
1736 * if (!is_eth_multi(bp))
1737 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1738 */
1739
Dmitry Kravkov96305232012-04-03 18:41:30 +00001740 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001741
1742 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1743
Dmitry Kravkov96305232012-04-03 18:41:30 +00001744 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001745
Dmitry Kravkov96305232012-04-03 18:41:30 +00001746 /* RSS configuration */
1747 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1748 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1749 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1750 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001751 if (rss_obj->udp_rss_v4)
1752 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1753 if (rss_obj->udp_rss_v6)
1754 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001755
Dmitry Kravkov96305232012-04-03 18:41:30 +00001756 /* Hash bits */
1757 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001758
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001759 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001760
Dmitry Kravkov96305232012-04-03 18:41:30 +00001761 if (config_hash) {
1762 /* RSS keys */
1763 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1764 params.rss_key[i] = random32();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001765
Dmitry Kravkov96305232012-04-03 18:41:30 +00001766 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001767 }
1768
1769 return bnx2x_config_rss(bp, &params);
1770}
1771
Eric Dumazet1191cb82012-04-27 21:39:21 +00001772static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001773{
Yuval Mintz3b603062012-03-18 10:33:39 +00001774 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001775
1776 /* Prepare parameters for function state transitions */
1777 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1778
1779 func_params.f_obj = &bp->func_obj;
1780 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1781
1782 func_params.params.hw_init.load_phase = load_code;
1783
1784 return bnx2x_func_state_change(bp, &func_params);
1785}
1786
1787/*
1788 * Cleans the object that have internal lists without sending
1789 * ramrods. Should be run when interrutps are disabled.
1790 */
1791static void bnx2x_squeeze_objects(struct bnx2x *bp)
1792{
1793 int rc;
1794 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00001795 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00001796 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001797
1798 /***************** Cleanup MACs' object first *************************/
1799
1800 /* Wait for completion of requested */
1801 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1802 /* Perform a dry cleanup */
1803 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1804
1805 /* Clean ETH primary MAC */
1806 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00001807 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001808 &ramrod_flags);
1809 if (rc != 0)
1810 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1811
1812 /* Cleanup UC list */
1813 vlan_mac_flags = 0;
1814 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1815 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1816 &ramrod_flags);
1817 if (rc != 0)
1818 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1819
1820 /***************** Now clean mcast object *****************************/
1821 rparam.mcast_obj = &bp->mcast_obj;
1822 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1823
1824 /* Add a DEL command... */
1825 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1826 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00001827 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1828 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001829
1830 /* ...and wait until all pending commands are cleared */
1831 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1832 while (rc != 0) {
1833 if (rc < 0) {
1834 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1835 rc);
1836 return;
1837 }
1838
1839 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1840 }
1841}
1842
1843#ifndef BNX2X_STOP_ON_ERROR
1844#define LOAD_ERROR_EXIT(bp, label) \
1845 do { \
1846 (bp)->state = BNX2X_STATE_ERROR; \
1847 goto label; \
1848 } while (0)
1849#else
1850#define LOAD_ERROR_EXIT(bp, label) \
1851 do { \
1852 (bp)->state = BNX2X_STATE_ERROR; \
1853 (bp)->panic = 1; \
1854 return -EBUSY; \
1855 } while (0)
1856#endif
1857
Yuval Mintz452427b2012-03-26 20:47:07 +00001858bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1859{
1860 /* build FW version dword */
1861 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1862 (BCM_5710_FW_MINOR_VERSION << 8) +
1863 (BCM_5710_FW_REVISION_VERSION << 16) +
1864 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1865
1866 /* read loaded FW from chip */
1867 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1868
1869 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1870
1871 if (loaded_fw != my_fw) {
1872 if (is_err)
1873 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1874 loaded_fw, my_fw);
1875 return false;
1876 }
1877
1878 return true;
1879}
1880
Eric Dumazet1191cb82012-04-27 21:39:21 +00001881/**
1882 * bnx2x_bz_fp - zero content of the fastpath structure.
1883 *
1884 * @bp: driver handle
1885 * @index: fastpath index to be zeroed
1886 *
1887 * Makes sure the contents of the bp->fp[index].napi is kept
1888 * intact.
1889 */
1890static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1891{
1892 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00001893 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1894
Merav Sicron65565882012-06-19 07:48:26 +00001895 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001896 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00001897 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001898 /* bzero bnx2x_fastpath contents */
Barak Witkowski15192a82012-06-19 07:48:28 +00001899 if (bp->stats_init) {
1900 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001901 memset(fp, 0, sizeof(*fp));
Barak Witkowski15192a82012-06-19 07:48:28 +00001902 } else {
Eric Dumazet1191cb82012-04-27 21:39:21 +00001903 /* Keep Queue statistics */
1904 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1905 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1906
1907 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1908 GFP_KERNEL);
1909 if (tmp_eth_q_stats)
Barak Witkowski15192a82012-06-19 07:48:28 +00001910 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001911 sizeof(struct bnx2x_eth_q_stats));
1912
1913 tmp_eth_q_stats_old =
1914 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1915 GFP_KERNEL);
1916 if (tmp_eth_q_stats_old)
Barak Witkowski15192a82012-06-19 07:48:28 +00001917 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001918 sizeof(struct bnx2x_eth_q_stats_old));
1919
Barak Witkowski15192a82012-06-19 07:48:28 +00001920 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001921 memset(fp, 0, sizeof(*fp));
1922
1923 if (tmp_eth_q_stats) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001924 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1925 sizeof(struct bnx2x_eth_q_stats));
Eric Dumazet1191cb82012-04-27 21:39:21 +00001926 kfree(tmp_eth_q_stats);
1927 }
1928
1929 if (tmp_eth_q_stats_old) {
Barak Witkowski15192a82012-06-19 07:48:28 +00001930 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00001931 sizeof(struct bnx2x_eth_q_stats_old));
1932 kfree(tmp_eth_q_stats_old);
1933 }
1934
1935 }
1936
1937 /* Restore the NAPI object as it has been already initialized */
1938 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00001939 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001940 fp->bp = bp;
1941 fp->index = index;
1942 if (IS_ETH_FP(fp))
1943 fp->max_cos = bp->max_cos;
1944 else
1945 /* Special queues support only one CoS */
1946 fp->max_cos = 1;
1947
Merav Sicron65565882012-06-19 07:48:26 +00001948 /* Init txdata pointers */
1949#ifdef BCM_CNIC
1950 if (IS_FCOE_FP(fp))
1951 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1952#endif
1953 if (IS_ETH_FP(fp))
1954 for_each_cos_in_tx_queue(fp, cos)
1955 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1956 BNX2X_NUM_ETH_QUEUES(bp) + index];
1957
Eric Dumazet1191cb82012-04-27 21:39:21 +00001958 /*
1959 * set the tpa flag for each queue. The tpa flag determines the queue
1960 * minimal size so it must be set prior to queue memory allocation
1961 */
1962 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1963 (bp->flags & GRO_ENABLE_FLAG &&
1964 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1965 if (bp->flags & TPA_ENABLE_FLAG)
1966 fp->mode = TPA_MODE_LRO;
1967 else if (bp->flags & GRO_ENABLE_FLAG)
1968 fp->mode = TPA_MODE_GRO;
1969
1970#ifdef BCM_CNIC
1971 /* We don't want TPA on an FCoE L2 ring */
1972 if (IS_FCOE_FP(fp))
1973 fp->disable_tpa = 1;
1974#endif
1975}
1976
1977
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001978/* must be called with rtnl_lock */
1979int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1980{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001981 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001982 u32 load_code;
1983 int i, rc;
1984
1985#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00001986 if (unlikely(bp->panic)) {
1987 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001988 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00001989 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001990#endif
1991
1992 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1993
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001994 /* Set the initial link reported state to link down */
1995 bnx2x_acquire_phy_lock(bp);
1996 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1997 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1998 &bp->last_reported_link.link_report_flags);
1999 bnx2x_release_phy_lock(bp);
2000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002001 /* must be called before memory allocation and HW init */
2002 bnx2x_ilt_set_info(bp);
2003
Ariel Elior6383c0b2011-07-14 08:31:57 +00002004 /*
2005 * Zero fastpath structures preserving invariants like napi, which are
2006 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002007 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002008 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002009 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002010 for_each_queue(bp, i)
2011 bnx2x_bz_fp(bp, i);
Merav Sicron65565882012-06-19 07:48:26 +00002012 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2013 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002014
Ariel Elior6383c0b2011-07-14 08:31:57 +00002015
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002016 /* Set the receive queues buffer size */
2017 bnx2x_set_rx_buf_size(bp);
2018
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002019 if (bnx2x_alloc_mem(bp))
2020 return -ENOMEM;
2021
2022 /* As long as bnx2x_alloc_mem() may possibly update
2023 * bp->num_queues, bnx2x_set_real_num_queues() should always
2024 * come after it.
2025 */
2026 rc = bnx2x_set_real_num_queues(bp);
2027 if (rc) {
2028 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002029 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002030 }
2031
Ariel Elior6383c0b2011-07-14 08:31:57 +00002032 /* configure multi cos mappings in kernel.
2033 * this configuration may be overriden by a multi class queue discipline
2034 * or by a dcbx negotiation result.
2035 */
2036 bnx2x_setup_tc(bp->dev, bp->max_cos);
2037
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038 bnx2x_napi_enable(bp);
2039
Ariel Elior889b9af2012-01-26 06:01:51 +00002040 /* set pf load just before approaching the MCP */
2041 bnx2x_set_pf_load(bp);
2042
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002043 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002044 * Returns the type of LOAD command:
2045 * if it is the first port to be initialized
2046 * common blocks should be initialized, otherwise - not
2047 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002048 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00002049 /* init fw_seq */
2050 bp->fw_seq =
2051 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2052 DRV_MSG_SEQ_NUMBER_MASK);
2053 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2054
2055 /* Get current FW pulse sequence */
2056 bp->fw_drv_pulse_wr_seq =
2057 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2058 DRV_PULSE_SEQ_MASK);
2059 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2060
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002061 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002062 if (!load_code) {
2063 BNX2X_ERR("MCP response failure, aborting\n");
2064 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002065 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002066 }
2067 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002068 BNX2X_ERR("Driver load refused\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002069 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002070 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002071 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00002072 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2073 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002074 /* abort nic load if version mismatch */
Yuval Mintz452427b2012-03-26 20:47:07 +00002075 if (!bnx2x_test_firmware_version(bp, true)) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002076 rc = -EBUSY;
2077 LOAD_ERROR_EXIT(bp, load_error2);
2078 }
2079 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002080
2081 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002082 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002083
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002084 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2085 path, load_count[path][0], load_count[path][1],
2086 load_count[path][2]);
2087 load_count[path][0]++;
2088 load_count[path][1 + port]++;
2089 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2090 path, load_count[path][0], load_count[path][1],
2091 load_count[path][2]);
2092 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002093 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002094 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002095 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2096 else
2097 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2098 }
2099
2100 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002101 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002102 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002103 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002104 /*
2105 * We need the barrier to ensure the ordering between the
2106 * writing to bp->port.pmf here and reading it from the
2107 * bnx2x_periodic_task().
2108 */
2109 smp_mb();
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002110 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002111 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002112
Merav Sicron51c1a582012-03-18 10:33:38 +00002113 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002114
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115 /* Init Function state controlling object */
2116 bnx2x__init_func_obj(bp);
2117
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002118 /* Initialize HW */
2119 rc = bnx2x_init_hw(bp, load_code);
2120 if (rc) {
2121 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002122 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002123 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002124 }
2125
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002126 /* Connect to IRQs */
2127 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002128 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002129 BNX2X_ERR("IRQs setup failed\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002130 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002131 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002132 }
2133
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002134 /* Setup NIC internals and enable interrupts */
2135 bnx2x_nic_init(bp, load_code);
2136
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002137 /* Init per-function objects */
2138 bnx2x_init_bp_objs(bp);
2139
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002140 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2141 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002142 (bp->common.shmem2_base)) {
2143 if (SHMEM2_HAS(bp, dcc_support))
2144 SHMEM2_WR(bp, dcc_support,
2145 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2146 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
Barak Witkowskia3348722012-04-23 03:04:46 +00002147 if (SHMEM2_HAS(bp, afex_driver_support))
2148 SHMEM2_WR(bp, afex_driver_support,
2149 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002150 }
2151
Barak Witkowskia3348722012-04-23 03:04:46 +00002152 /* Set AFEX default VLAN tag to an invalid value */
2153 bp->afex_def_vlan_tag = -1;
2154
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002155 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2156 rc = bnx2x_func_start(bp);
2157 if (rc) {
2158 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00002159 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002160 LOAD_ERROR_EXIT(bp, load_error3);
2161 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002162
2163 /* Send LOAD_DONE command to MCP */
2164 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002165 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002166 if (!load_code) {
2167 BNX2X_ERR("MCP response failure, aborting\n");
2168 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002169 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002170 }
2171 }
2172
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002173 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174 if (rc) {
2175 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002176 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002177 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002178
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002179#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002180 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002181 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002182#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002183
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002184 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002185 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Merav Sicron51c1a582012-03-18 10:33:38 +00002186 if (rc) {
2187 BNX2X_ERR("Queue setup failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002188 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002189 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002190 }
2191
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002192 rc = bnx2x_init_rss_pf(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002193 if (rc) {
2194 BNX2X_ERR("PF RSS init failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002195 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002196 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002197
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002198 /* Now when Clients are configured we are ready to work */
2199 bp->state = BNX2X_STATE_OPEN;
2200
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002201 /* Configure a ucast MAC */
2202 rc = bnx2x_set_eth_mac(bp, true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002203 if (rc) {
2204 BNX2X_ERR("Setting Ethernet MAC failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002205 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002206 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002207
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002208 if (bp->pending_max) {
2209 bnx2x_update_max_mf_config(bp, bp->pending_max);
2210 bp->pending_max = 0;
2211 }
2212
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002213 if (bp->port.pmf)
2214 bnx2x_initial_phy_init(bp, load_mode);
2215
2216 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002217
2218 /* Initialize Rx filter. */
2219 netif_addr_lock_bh(bp->dev);
2220 bnx2x_set_rx_mode(bp->dev);
2221 netif_addr_unlock_bh(bp->dev);
2222
2223 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002224 switch (load_mode) {
2225 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002226 /* Tx queue should be only reenabled */
2227 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002228 break;
2229
2230 case LOAD_OPEN:
2231 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002232 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002233 break;
2234
2235 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002236 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002237 bp->state = BNX2X_STATE_DIAG;
2238 break;
2239
2240 default:
2241 break;
2242 }
2243
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002244 if (bp->port.pmf)
Yuval Mintze695a2d2012-03-12 11:22:06 +00002245 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002246 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002247 bnx2x__link_status_update(bp);
2248
2249 /* start the timer */
2250 mod_timer(&bp->timer, jiffies + bp->current_interval);
2251
2252#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002253 /* re-read iscsi info */
2254 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002255 bnx2x_setup_cnic_irq_info(bp);
Merav Sicron37ae41a2012-06-19 07:48:27 +00002256 bnx2x_setup_cnic_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002257 if (bp->state == BNX2X_STATE_OPEN)
2258 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2259#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002260
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002261 /* mark driver is loaded in shmem2 */
2262 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2263 u32 val;
2264 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2265 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2266 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2267 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2268 }
2269
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002270 /* Wait for all pending SP commands to complete */
2271 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2272 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2273 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2274 return -EBUSY;
2275 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002276
Merav Sicron8970b2e2012-06-19 07:48:22 +00002277 if (bp->state != BNX2X_STATE_DIAG)
2278 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002279 return 0;
2280
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002281#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002282load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002283#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002284 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002285 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002286#endif
2287load_error3:
2288 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002289
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002290 /* Clean queueable objects */
2291 bnx2x_squeeze_objects(bp);
2292
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002293 /* Free SKBs, SGEs, TPA pool and driver internals */
2294 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002295 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002296 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002297
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002298 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002299 bnx2x_free_irq(bp);
2300load_error2:
2301 if (!BP_NOMCP(bp)) {
2302 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2303 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2304 }
2305
2306 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002307load_error1:
2308 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002309 /* clear pf_load status, as it was already set */
2310 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002311load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002312 bnx2x_free_mem(bp);
2313
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002314 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002315#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002316}
2317
2318/* must be called with rtnl_lock */
2319int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2320{
2321 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002322 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002323
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002324 /* mark driver is unloaded in shmem2 */
2325 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2326 u32 val;
2327 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2328 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2329 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2330 }
2331
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002332 if ((bp->state == BNX2X_STATE_CLOSED) ||
2333 (bp->state == BNX2X_STATE_ERROR)) {
2334 /* We can get here if the driver has been unloaded
2335 * during parity error recovery and is either waiting for a
2336 * leader to complete or for other functions to unload and
2337 * then ifdown has been issued. In this case we want to
2338 * unload and let other functions to complete a recovery
2339 * process.
2340 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002341 bp->recovery_state = BNX2X_RECOVERY_DONE;
2342 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002343 bnx2x_release_leader_lock(bp);
2344 smp_mb();
2345
Merav Sicron51c1a582012-03-18 10:33:38 +00002346 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2347 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002348 return -EINVAL;
2349 }
2350
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002351 /*
2352 * It's important to set the bp->state to the value different from
2353 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2354 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2355 */
2356 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2357 smp_mb();
2358
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002359 /* Stop Tx */
2360 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002361 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002362
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002363#ifdef BCM_CNIC
2364 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2365#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002366
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002367 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002368
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002369 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002370
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002371 /* Set ALWAYS_ALIVE bit in shmem */
2372 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2373
2374 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002375
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002376 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002377 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002378
2379 /* Cleanup the chip if needed */
2380 if (unload_mode != UNLOAD_RECOVERY)
2381 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002382 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002383 /* Send the UNLOAD_REQUEST to the MCP */
2384 bnx2x_send_unload_req(bp, unload_mode);
2385
2386 /*
2387 * Prevent transactions to host from the functions on the
2388 * engine that doesn't reset global blocks in case of global
2389 * attention once gloabl blocks are reset and gates are opened
2390 * (the engine which leader will perform the recovery
2391 * last).
2392 */
2393 if (!CHIP_IS_E1x(bp))
2394 bnx2x_pf_disable(bp);
2395
2396 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002397 bnx2x_netif_stop(bp, 1);
2398
2399 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002400 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002401
2402 /* Report UNLOAD_DONE to MCP */
2403 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002404 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002405
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002406 /*
2407 * At this stage no more interrupts will arrive so we may safly clean
2408 * the queueable objects here in case they failed to get cleaned so far.
2409 */
2410 bnx2x_squeeze_objects(bp);
2411
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002412 /* There should be no more pending SP commands at this stage */
2413 bp->sp_state = 0;
2414
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002415 bp->port.pmf = 0;
2416
2417 /* Free SKBs, SGEs, TPA pool and driver internals */
2418 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002419 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002420 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002421
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002422 bnx2x_free_mem(bp);
2423
2424 bp->state = BNX2X_STATE_CLOSED;
2425
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002426 /* Check if there are pending parity attentions. If there are - set
2427 * RECOVERY_IN_PROGRESS.
2428 */
2429 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2430 bnx2x_set_reset_in_progress(bp);
2431
2432 /* Set RESET_IS_GLOBAL if needed */
2433 if (global)
2434 bnx2x_set_reset_global(bp);
2435 }
2436
2437
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002438 /* The last driver must disable a "close the gate" if there is no
2439 * parity attention or "process kill" pending.
2440 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002441 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002442 bnx2x_disable_close_the_gate(bp);
2443
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002444 return 0;
2445}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002446
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002447int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2448{
2449 u16 pmcsr;
2450
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002451 /* If there is no power capability, silently succeed */
2452 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002453 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002454 return 0;
2455 }
2456
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002457 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2458
2459 switch (state) {
2460 case PCI_D0:
2461 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2462 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2463 PCI_PM_CTRL_PME_STATUS));
2464
2465 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2466 /* delay required during transition out of D3hot */
2467 msleep(20);
2468 break;
2469
2470 case PCI_D3hot:
2471 /* If there are other clients above don't
2472 shut down the power */
2473 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2474 return 0;
2475 /* Don't shut down the power for emulation and FPGA */
2476 if (CHIP_REV_IS_SLOW(bp))
2477 return 0;
2478
2479 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2480 pmcsr |= 3;
2481
2482 if (bp->wol)
2483 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2484
2485 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2486 pmcsr);
2487
2488 /* No more memory access after this point until
2489 * device is brought back to D0.
2490 */
2491 break;
2492
2493 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00002494 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002495 return -EINVAL;
2496 }
2497 return 0;
2498}
2499
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002500/*
2501 * net_device service functions
2502 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002503int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002504{
2505 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002506 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002507 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2508 napi);
2509 struct bnx2x *bp = fp->bp;
2510
2511 while (1) {
2512#ifdef BNX2X_STOP_ON_ERROR
2513 if (unlikely(bp->panic)) {
2514 napi_complete(napi);
2515 return 0;
2516 }
2517#endif
2518
Ariel Elior6383c0b2011-07-14 08:31:57 +00002519 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00002520 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2521 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002522
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002523
2524 if (bnx2x_has_rx_work(fp)) {
2525 work_done += bnx2x_rx_int(fp, budget - work_done);
2526
2527 /* must not complete if we consumed full budget */
2528 if (work_done >= budget)
2529 break;
2530 }
2531
2532 /* Fall out from the NAPI loop if needed */
2533 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002534#ifdef BCM_CNIC
2535 /* No need to update SB for FCoE L2 ring as long as
2536 * it's connected to the default SB and the SB
2537 * has been updated when NAPI was scheduled.
2538 */
2539 if (IS_FCOE_FP(fp)) {
2540 napi_complete(napi);
2541 break;
2542 }
2543#endif
2544
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002545 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002546 /* bnx2x_has_rx_work() reads the status block,
2547 * thus we need to ensure that status block indices
2548 * have been actually read (bnx2x_update_fpsb_idx)
2549 * prior to this check (bnx2x_has_rx_work) so that
2550 * we won't write the "newer" value of the status block
2551 * to IGU (if there was a DMA right after
2552 * bnx2x_has_rx_work and if there is no rmb, the memory
2553 * reading (bnx2x_update_fpsb_idx) may be postponed
2554 * to right before bnx2x_ack_sb). In this case there
2555 * will never be another interrupt until there is
2556 * another update of the status block, while there
2557 * is still unhandled work.
2558 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002559 rmb();
2560
2561 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2562 napi_complete(napi);
2563 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00002564 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002565 "Update index to %d\n", fp->fp_hc_idx);
2566 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2567 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002568 IGU_INT_ENABLE, 1);
2569 break;
2570 }
2571 }
2572 }
2573
2574 return work_done;
2575}
2576
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002577/* we split the first BD into headers and data BDs
2578 * to ease the pain of our fellow microcode engineers
2579 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002580 */
2581static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002582 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002583 struct sw_tx_bd *tx_buf,
2584 struct eth_tx_start_bd **tx_bd, u16 hlen,
2585 u16 bd_prod, int nbd)
2586{
2587 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2588 struct eth_tx_bd *d_tx_bd;
2589 dma_addr_t mapping;
2590 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2591
2592 /* first fix first BD */
2593 h_tx_bd->nbd = cpu_to_le16(nbd);
2594 h_tx_bd->nbytes = cpu_to_le16(hlen);
2595
Merav Sicron51c1a582012-03-18 10:33:38 +00002596 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2597 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002598
2599 /* now get a new data BD
2600 * (after the pbd) and fill it */
2601 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002602 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002603
2604 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2605 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2606
2607 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2608 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2609 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2610
2611 /* this marks the BD as one that has no individual mapping */
2612 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2613
2614 DP(NETIF_MSG_TX_QUEUED,
2615 "TSO split data size is %d (%x:%x)\n",
2616 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2617
2618 /* update tx_bd */
2619 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2620
2621 return bd_prod;
2622}
2623
2624static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2625{
2626 if (fix > 0)
2627 csum = (u16) ~csum_fold(csum_sub(csum,
2628 csum_partial(t_header - fix, fix, 0)));
2629
2630 else if (fix < 0)
2631 csum = (u16) ~csum_fold(csum_add(csum,
2632 csum_partial(t_header, -fix, 0)));
2633
2634 return swab16(csum);
2635}
2636
2637static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2638{
2639 u32 rc;
2640
2641 if (skb->ip_summed != CHECKSUM_PARTIAL)
2642 rc = XMIT_PLAIN;
2643
2644 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002645 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002646 rc = XMIT_CSUM_V6;
2647 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2648 rc |= XMIT_CSUM_TCP;
2649
2650 } else {
2651 rc = XMIT_CSUM_V4;
2652 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2653 rc |= XMIT_CSUM_TCP;
2654 }
2655 }
2656
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002657 if (skb_is_gso_v6(skb))
2658 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2659 else if (skb_is_gso(skb))
2660 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002661
2662 return rc;
2663}
2664
2665#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2666/* check if packet requires linearization (packet is too fragmented)
2667 no need to check fragmentation if page size > 8K (there will be no
2668 violation to FW restrictions) */
2669static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2670 u32 xmit_type)
2671{
2672 int to_copy = 0;
2673 int hlen = 0;
2674 int first_bd_sz = 0;
2675
2676 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2677 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2678
2679 if (xmit_type & XMIT_GSO) {
2680 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2681 /* Check if LSO packet needs to be copied:
2682 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2683 int wnd_size = MAX_FETCH_BD - 3;
2684 /* Number of windows to check */
2685 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2686 int wnd_idx = 0;
2687 int frag_idx = 0;
2688 u32 wnd_sum = 0;
2689
2690 /* Headers length */
2691 hlen = (int)(skb_transport_header(skb) - skb->data) +
2692 tcp_hdrlen(skb);
2693
2694 /* Amount of data (w/o headers) on linear part of SKB*/
2695 first_bd_sz = skb_headlen(skb) - hlen;
2696
2697 wnd_sum = first_bd_sz;
2698
2699 /* Calculate the first sum - it's special */
2700 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2701 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002702 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002703
2704 /* If there was data on linear skb data - check it */
2705 if (first_bd_sz > 0) {
2706 if (unlikely(wnd_sum < lso_mss)) {
2707 to_copy = 1;
2708 goto exit_lbl;
2709 }
2710
2711 wnd_sum -= first_bd_sz;
2712 }
2713
2714 /* Others are easier: run through the frag list and
2715 check all windows */
2716 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2717 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002718 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002719
2720 if (unlikely(wnd_sum < lso_mss)) {
2721 to_copy = 1;
2722 break;
2723 }
2724 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002725 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002726 }
2727 } else {
2728 /* in non-LSO too fragmented packet should always
2729 be linearized */
2730 to_copy = 1;
2731 }
2732 }
2733
2734exit_lbl:
2735 if (unlikely(to_copy))
2736 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00002737 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002738 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2739 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2740
2741 return to_copy;
2742}
2743#endif
2744
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002745static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2746 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002747{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002748 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2749 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2750 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002751 if ((xmit_type & XMIT_GSO_V6) &&
2752 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002753 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002754}
2755
2756/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002757 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002758 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002759 * @skb: packet skb
2760 * @pbd: parse BD
2761 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002762 */
2763static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2764 struct eth_tx_parse_bd_e1x *pbd,
2765 u32 xmit_type)
2766{
2767 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2768 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2769 pbd->tcp_flags = pbd_tcp_flags(skb);
2770
2771 if (xmit_type & XMIT_GSO_V4) {
2772 pbd->ip_id = swab16(ip_hdr(skb)->id);
2773 pbd->tcp_pseudo_csum =
2774 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2775 ip_hdr(skb)->daddr,
2776 0, IPPROTO_TCP, 0));
2777
2778 } else
2779 pbd->tcp_pseudo_csum =
2780 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2781 &ipv6_hdr(skb)->daddr,
2782 0, IPPROTO_TCP, 0));
2783
2784 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2785}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002786
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002787/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002788 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002789 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002790 * @bp: driver handle
2791 * @skb: packet skb
2792 * @parsing_data: data to be updated
2793 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002794 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002795 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002796 */
2797static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002798 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002799{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002800 *parsing_data |=
2801 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2802 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2803 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002804
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002805 if (xmit_type & XMIT_CSUM_TCP) {
2806 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2807 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2808 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002809
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002810 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2811 } else
2812 /* We support checksum offload for TCP and UDP only.
2813 * No need to pass the UDP header length - it's a constant.
2814 */
2815 return skb_transport_header(skb) +
2816 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002817}
2818
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002819static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2820 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2821{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002822 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2823
2824 if (xmit_type & XMIT_CSUM_V4)
2825 tx_start_bd->bd_flags.as_bitfield |=
2826 ETH_TX_BD_FLAGS_IP_CSUM;
2827 else
2828 tx_start_bd->bd_flags.as_bitfield |=
2829 ETH_TX_BD_FLAGS_IPV6;
2830
2831 if (!(xmit_type & XMIT_CSUM_TCP))
2832 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002833}
2834
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002835/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002836 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002837 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002838 * @bp: driver handle
2839 * @skb: packet skb
2840 * @pbd: parse BD to be updated
2841 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002842 */
2843static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2844 struct eth_tx_parse_bd_e1x *pbd,
2845 u32 xmit_type)
2846{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002847 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002848
2849 /* for now NS flag is not used in Linux */
2850 pbd->global_data =
2851 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2852 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2853
2854 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002855 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002856
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002857 hlen += pbd->ip_hlen_w;
2858
2859 /* We support checksum offload for TCP and UDP only */
2860 if (xmit_type & XMIT_CSUM_TCP)
2861 hlen += tcp_hdrlen(skb) / 2;
2862 else
2863 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002864
2865 pbd->total_hlen_w = cpu_to_le16(hlen);
2866 hlen = hlen*2;
2867
2868 if (xmit_type & XMIT_CSUM_TCP) {
2869 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2870
2871 } else {
2872 s8 fix = SKB_CS_OFF(skb); /* signed! */
2873
2874 DP(NETIF_MSG_TX_QUEUED,
2875 "hlen %d fix %d csum before fix %x\n",
2876 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2877
2878 /* HW bug: fixup the CSUM */
2879 pbd->tcp_pseudo_csum =
2880 bnx2x_csum_fix(skb_transport_header(skb),
2881 SKB_CS(skb), fix);
2882
2883 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2884 pbd->tcp_pseudo_csum);
2885 }
2886
2887 return hlen;
2888}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002889
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002890/* called with netif_tx_lock
2891 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2892 * netif_wake_queue()
2893 */
2894netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2895{
2896 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002897
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002898 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002899 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002900 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002901 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002902 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002903 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002904 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002905 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002906 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00002907 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002908 dma_addr_t mapping;
2909 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2910 int i;
2911 u8 hlen = 0;
2912 __le16 pkt_size = 0;
2913 struct ethhdr *eth;
2914 u8 mac_type = UNICAST_ADDRESS;
2915
2916#ifdef BNX2X_STOP_ON_ERROR
2917 if (unlikely(bp->panic))
2918 return NETDEV_TX_BUSY;
2919#endif
2920
Ariel Elior6383c0b2011-07-14 08:31:57 +00002921 txq_index = skb_get_queue_mapping(skb);
2922 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002923
Ariel Elior6383c0b2011-07-14 08:31:57 +00002924 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2925
Merav Sicron65565882012-06-19 07:48:26 +00002926 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002927
2928 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00002929 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002930 txq_index, fp_index, txdata_index); */
2931
Ariel Elior6383c0b2011-07-14 08:31:57 +00002932 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00002933 DP(NETIF_MSG_TX_QUEUED,
2934 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002935 txdata->cid, fp_index, txdata_index, txdata, fp); */
2936
2937 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2938 (skb_shinfo(skb)->nr_frags + 3))) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002939 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002940 netif_tx_stop_queue(txq);
2941 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2942 return NETDEV_TX_BUSY;
2943 }
2944
Merav Sicron51c1a582012-03-18 10:33:38 +00002945 DP(NETIF_MSG_TX_QUEUED,
2946 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002947 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002948 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2949
2950 eth = (struct ethhdr *)skb->data;
2951
2952 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2953 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2954 if (is_broadcast_ether_addr(eth->h_dest))
2955 mac_type = BROADCAST_ADDRESS;
2956 else
2957 mac_type = MULTICAST_ADDRESS;
2958 }
2959
2960#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2961 /* First, check if we need to linearize the skb (due to FW
2962 restrictions). No need to check fragmentation if page size > 8K
2963 (there will be no violation to FW restrictions) */
2964 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2965 /* Statistics of linearization */
2966 bp->lin_cnt++;
2967 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002968 DP(NETIF_MSG_TX_QUEUED,
2969 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002970 dev_kfree_skb_any(skb);
2971 return NETDEV_TX_OK;
2972 }
2973 }
2974#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002975 /* Map skb linear data for DMA */
2976 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2977 skb_headlen(skb), DMA_TO_DEVICE);
2978 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002979 DP(NETIF_MSG_TX_QUEUED,
2980 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002981 dev_kfree_skb_any(skb);
2982 return NETDEV_TX_OK;
2983 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002984 /*
2985 Please read carefully. First we use one BD which we mark as start,
2986 then we have a parsing info BD (used for TSO or xsum),
2987 and only then we have the rest of the TSO BDs.
2988 (don't forget to mark the last one as last,
2989 and to unmap only AFTER you write to the BD ...)
2990 And above all, all pdb sizes are in words - NOT DWORDS!
2991 */
2992
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002993 /* get current pkt produced now - advance it just before sending packet
2994 * since mapping of pages may fail and cause packet to be dropped
2995 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002996 pkt_prod = txdata->tx_pkt_prod;
2997 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002998
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002999 /* get a tx_buf and first BD
3000 * tx_start_bd may be changed during SPLIT,
3001 * but first_bd will always stay first
3002 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003003 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3004 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003005 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003006
3007 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003008 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
3009 mac_type);
3010
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003011 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003012 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003013
3014 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003015 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003016 tx_buf->skb = skb;
3017 tx_buf->flags = 0;
3018
3019 DP(NETIF_MSG_TX_QUEUED,
3020 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003021 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003022
Jesse Grosseab6d182010-10-20 13:56:03 +00003023 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003024 tx_start_bd->vlan_or_ethertype =
3025 cpu_to_le16(vlan_tx_tag_get(skb));
3026 tx_start_bd->bd_flags.as_bitfield |=
3027 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003028 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003029 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003030
3031 /* turn on parsing and get a BD */
3032 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003033
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003034 if (xmit_type & XMIT_CSUM)
3035 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003036
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003037 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003038 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003039 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3040 /* Set PBD in checksum offload case */
3041 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003042 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3043 &pbd_e2_parsing_data,
3044 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003045 if (IS_MF_SI(bp)) {
3046 /*
3047 * fill in the MAC addresses in the PBD - for local
3048 * switching
3049 */
3050 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3051 &pbd_e2->src_mac_addr_mid,
3052 &pbd_e2->src_mac_addr_lo,
3053 eth->h_source);
3054 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3055 &pbd_e2->dst_mac_addr_mid,
3056 &pbd_e2->dst_mac_addr_lo,
3057 eth->h_dest);
3058 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003059 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003060 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003061 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3062 /* Set PBD in checksum offload case */
3063 if (xmit_type & XMIT_CSUM)
3064 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003065
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003066 }
3067
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003068 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003069 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3070 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003071 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003072 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3073 pkt_size = tx_start_bd->nbytes;
3074
Merav Sicron51c1a582012-03-18 10:33:38 +00003075 DP(NETIF_MSG_TX_QUEUED,
3076 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003077 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3078 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003079 tx_start_bd->bd_flags.as_bitfield,
3080 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003081
3082 if (xmit_type & XMIT_GSO) {
3083
3084 DP(NETIF_MSG_TX_QUEUED,
3085 "TSO packet len %d hlen %d total len %d tso size %d\n",
3086 skb->len, hlen, skb_headlen(skb),
3087 skb_shinfo(skb)->gso_size);
3088
3089 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3090
3091 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003092 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3093 &tx_start_bd, hlen,
3094 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003095 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003096 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3097 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003098 else
3099 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003100 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003101
3102 /* Set the PBD's parsing_data field if not zero
3103 * (for the chips newer than 57711).
3104 */
3105 if (pbd_e2_parsing_data)
3106 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3107
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003108 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3109
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003110 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3113
Eric Dumazet9e903e02011-10-18 21:00:24 +00003114 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3115 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003116 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003117 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003118
Merav Sicron51c1a582012-03-18 10:33:38 +00003119 DP(NETIF_MSG_TX_QUEUED,
3120 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003121
3122 /* we need unmap all buffers already mapped
3123 * for this SKB;
3124 * first_bd->nbd need to be properly updated
3125 * before call to bnx2x_free_tx_pkt
3126 */
3127 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003128 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003129 TX_BD(txdata->tx_pkt_prod),
3130 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003131 return NETDEV_TX_OK;
3132 }
3133
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003134 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003135 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003136 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003137 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003138
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003139 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3140 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003141 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3142 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003143 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003144
3145 DP(NETIF_MSG_TX_QUEUED,
3146 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3147 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3148 le16_to_cpu(tx_data_bd->nbytes));
3149 }
3150
3151 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3152
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003153 /* update with actual num BDs */
3154 first_bd->nbd = cpu_to_le16(nbd);
3155
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003156 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3157
3158 /* now send a tx doorbell, counting the next BD
3159 * if the packet contains or ends with it
3160 */
3161 if (TX_BD_POFF(bd_prod) < nbd)
3162 nbd++;
3163
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003164 /* total_pkt_bytes should be set on the first data BD if
3165 * it's not an LSO packet and there is more than one
3166 * data BD. In this case pkt_size is limited by an MTU value.
3167 * However we prefer to set it for an LSO packet (while we don't
3168 * have to) in order to save some CPU cycles in a none-LSO
3169 * case, when we much more care about them.
3170 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003171 if (total_pkt_bd != NULL)
3172 total_pkt_bd->total_pkt_bytes = pkt_size;
3173
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003174 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003175 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003176 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003177 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3178 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3179 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3180 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003181 if (pbd_e2)
3182 DP(NETIF_MSG_TX_QUEUED,
3183 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3184 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3185 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3186 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3187 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003188 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3189
Tom Herbert2df1a702011-11-28 16:33:37 +00003190 netdev_tx_sent_queue(txq, skb->len);
3191
Willem de Bruijn8373c572012-04-27 09:04:06 +00003192 skb_tx_timestamp(skb);
3193
Ariel Elior6383c0b2011-07-14 08:31:57 +00003194 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003195 /*
3196 * Make sure that the BD data is updated before updating the producer
3197 * since FW might read the BD right after the producer is updated.
3198 * This is only applicable for weak-ordered memory model archs such
3199 * as IA-64. The following barrier is also mandatory since FW will
3200 * assumes packets must have BDs.
3201 */
3202 wmb();
3203
Ariel Elior6383c0b2011-07-14 08:31:57 +00003204 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003205 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003206
Ariel Elior6383c0b2011-07-14 08:31:57 +00003207 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003208
3209 mmiowb();
3210
Ariel Elior6383c0b2011-07-14 08:31:57 +00003211 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003212
Eric Dumazetbc147862012-06-13 09:45:16 +00003213 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003214 netif_tx_stop_queue(txq);
3215
3216 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3217 * ordering of set_bit() in netif_tx_stop_queue() and read of
3218 * fp->bd_tx_cons */
3219 smp_mb();
3220
Barak Witkowski15192a82012-06-19 07:48:28 +00003221 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Eric Dumazetbc147862012-06-13 09:45:16 +00003222 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003223 netif_tx_wake_queue(txq);
3224 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003225 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003226
3227 return NETDEV_TX_OK;
3228}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003229
Ariel Elior6383c0b2011-07-14 08:31:57 +00003230/**
3231 * bnx2x_setup_tc - routine to configure net_device for multi tc
3232 *
3233 * @netdev: net device to configure
3234 * @tc: number of traffic classes to enable
3235 *
3236 * callback connected to the ndo_setup_tc function pointer
3237 */
3238int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3239{
3240 int cos, prio, count, offset;
3241 struct bnx2x *bp = netdev_priv(dev);
3242
3243 /* setup tc must be called under rtnl lock */
3244 ASSERT_RTNL();
3245
3246 /* no traffic classes requested. aborting */
3247 if (!num_tc) {
3248 netdev_reset_tc(dev);
3249 return 0;
3250 }
3251
3252 /* requested to support too many traffic classes */
3253 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003254 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3255 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003256 return -EINVAL;
3257 }
3258
3259 /* declare amount of supported traffic classes */
3260 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003261 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003262 return -EINVAL;
3263 }
3264
3265 /* configure priority to traffic class mapping */
3266 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3267 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003268 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3269 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003270 prio, bp->prio_to_cos[prio]);
3271 }
3272
3273
3274 /* Use this configuration to diffrentiate tc0 from other COSes
3275 This can be used for ets or pfc, and save the effort of setting
3276 up a multio class queue disc or negotiating DCBX with a switch
3277 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003278 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003279 for (prio = 1; prio < 16; prio++) {
3280 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003281 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003282 } */
3283
3284 /* configure traffic class to transmission queue mapping */
3285 for (cos = 0; cos < bp->max_cos; cos++) {
3286 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003287 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003288 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003289 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3290 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003291 cos, offset, count);
3292 }
3293
3294 return 0;
3295}
3296
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003297/* called with rtnl_lock */
3298int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3299{
3300 struct sockaddr *addr = p;
3301 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003302 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003303
Merav Sicron51c1a582012-03-18 10:33:38 +00003304 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3305 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003306 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003307 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003308
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003309#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003310 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3311 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003312 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003313 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003314 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003315#endif
3316
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003317 if (netif_running(dev)) {
3318 rc = bnx2x_set_eth_mac(bp, false);
3319 if (rc)
3320 return rc;
3321 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003322
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003323 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003324 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3325
3326 if (netif_running(dev))
3327 rc = bnx2x_set_eth_mac(bp, true);
3328
3329 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003330}
3331
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003332static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3333{
3334 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3335 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003336 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003337
3338 /* Common */
3339#ifdef BCM_CNIC
3340 if (IS_FCOE_IDX(fp_index)) {
3341 memset(sb, 0, sizeof(union host_hc_status_block));
3342 fp->status_blk_mapping = 0;
3343
3344 } else {
3345#endif
3346 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003347 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003348 BNX2X_PCI_FREE(sb->e2_sb,
3349 bnx2x_fp(bp, fp_index,
3350 status_blk_mapping),
3351 sizeof(struct host_hc_status_block_e2));
3352 else
3353 BNX2X_PCI_FREE(sb->e1x_sb,
3354 bnx2x_fp(bp, fp_index,
3355 status_blk_mapping),
3356 sizeof(struct host_hc_status_block_e1x));
3357#ifdef BCM_CNIC
3358 }
3359#endif
3360 /* Rx */
3361 if (!skip_rx_queue(bp, fp_index)) {
3362 bnx2x_free_rx_bds(fp);
3363
3364 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3365 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3366 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3367 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3368 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3369
3370 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3371 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3372 sizeof(struct eth_fast_path_rx_cqe) *
3373 NUM_RCQ_BD);
3374
3375 /* SGE ring */
3376 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3377 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3378 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3379 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3380 }
3381
3382 /* Tx */
3383 if (!skip_tx_queue(bp, fp_index)) {
3384 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003385 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003387
Merav Sicron51c1a582012-03-18 10:33:38 +00003388 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003389 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003390 fp_index, cos, txdata->cid);
3391
3392 BNX2X_FREE(txdata->tx_buf_ring);
3393 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3394 txdata->tx_desc_mapping,
3395 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3396 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003397 }
3398 /* end of fastpath */
3399}
3400
3401void bnx2x_free_fp_mem(struct bnx2x *bp)
3402{
3403 int i;
3404 for_each_queue(bp, i)
3405 bnx2x_free_fp_mem_at(bp, i);
3406}
3407
Eric Dumazet1191cb82012-04-27 21:39:21 +00003408static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003409{
3410 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003411 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003412 bnx2x_fp(bp, index, sb_index_values) =
3413 (__le16 *)status_blk.e2_sb->sb.index_values;
3414 bnx2x_fp(bp, index, sb_running_index) =
3415 (__le16 *)status_blk.e2_sb->sb.running_index;
3416 } else {
3417 bnx2x_fp(bp, index, sb_index_values) =
3418 (__le16 *)status_blk.e1x_sb->sb.index_values;
3419 bnx2x_fp(bp, index, sb_running_index) =
3420 (__le16 *)status_blk.e1x_sb->sb.running_index;
3421 }
3422}
3423
Eric Dumazet1191cb82012-04-27 21:39:21 +00003424/* Returns the number of actually allocated BDs */
3425static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3426 int rx_ring_size)
3427{
3428 struct bnx2x *bp = fp->bp;
3429 u16 ring_prod, cqe_ring_prod;
3430 int i, failure_cnt = 0;
3431
3432 fp->rx_comp_cons = 0;
3433 cqe_ring_prod = ring_prod = 0;
3434
3435 /* This routine is called only during fo init so
3436 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3437 */
3438 for (i = 0; i < rx_ring_size; i++) {
3439 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3440 failure_cnt++;
3441 continue;
3442 }
3443 ring_prod = NEXT_RX_IDX(ring_prod);
3444 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3445 WARN_ON(ring_prod <= (i - failure_cnt));
3446 }
3447
3448 if (failure_cnt)
3449 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3450 i - failure_cnt, fp->index);
3451
3452 fp->rx_bd_prod = ring_prod;
3453 /* Limit the CQE producer by the CQE ring size */
3454 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3455 cqe_ring_prod);
3456 fp->rx_pkt = fp->rx_calls = 0;
3457
Barak Witkowski15192a82012-06-19 07:48:28 +00003458 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00003459
3460 return i - failure_cnt;
3461}
3462
3463static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3464{
3465 int i;
3466
3467 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3468 struct eth_rx_cqe_next_page *nextpg;
3469
3470 nextpg = (struct eth_rx_cqe_next_page *)
3471 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3472 nextpg->addr_hi =
3473 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3474 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3475 nextpg->addr_lo =
3476 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3477 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3478 }
3479}
3480
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003481static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3482{
3483 union host_hc_status_block *sb;
3484 struct bnx2x_fastpath *fp = &bp->fp[index];
3485 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003486 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003487 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003488
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003489#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003490 if (!bp->rx_ring_size &&
3491 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003492 rx_ring_size = MIN_RX_SIZE_NONTPA;
3493 bp->rx_ring_size = rx_ring_size;
3494 } else
3495#endif
David S. Miller8decf862011-09-22 03:23:13 -04003496 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003497 u32 cfg = SHMEM_RD(bp,
3498 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003499
David S. Miller8decf862011-09-22 03:23:13 -04003500 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3501
Mintz Yuvald760fc32012-02-15 02:10:28 +00003502 /* Dercease ring size for 1G functions */
3503 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3504 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3505 rx_ring_size /= 10;
3506
David S. Miller8decf862011-09-22 03:23:13 -04003507 /* allocate at least number of buffers required by FW */
3508 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3509 MIN_RX_SIZE_TPA, rx_ring_size);
3510
3511 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003512 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003513 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003514
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003515 /* Common */
3516 sb = &bnx2x_fp(bp, index, status_blk);
3517#ifdef BCM_CNIC
3518 if (!IS_FCOE_IDX(index)) {
3519#endif
3520 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003521 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003522 BNX2X_PCI_ALLOC(sb->e2_sb,
3523 &bnx2x_fp(bp, index, status_blk_mapping),
3524 sizeof(struct host_hc_status_block_e2));
3525 else
3526 BNX2X_PCI_ALLOC(sb->e1x_sb,
3527 &bnx2x_fp(bp, index, status_blk_mapping),
3528 sizeof(struct host_hc_status_block_e1x));
3529#ifdef BCM_CNIC
3530 }
3531#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003532
3533 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3534 * set shortcuts for it.
3535 */
3536 if (!IS_FCOE_IDX(index))
3537 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003538
3539 /* Tx */
3540 if (!skip_tx_queue(bp, index)) {
3541 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003542 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003543 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003544
Merav Sicron51c1a582012-03-18 10:33:38 +00003545 DP(NETIF_MSG_IFUP,
3546 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003547 index, cos);
3548
3549 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003550 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003551 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3552 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003553 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003554 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003555 }
3556
3557 /* Rx */
3558 if (!skip_rx_queue(bp, index)) {
3559 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3560 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3561 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3562 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3563 &bnx2x_fp(bp, index, rx_desc_mapping),
3564 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3565
3566 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3567 &bnx2x_fp(bp, index, rx_comp_mapping),
3568 sizeof(struct eth_fast_path_rx_cqe) *
3569 NUM_RCQ_BD);
3570
3571 /* SGE ring */
3572 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3573 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3574 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3575 &bnx2x_fp(bp, index, rx_sge_mapping),
3576 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3577 /* RX BD ring */
3578 bnx2x_set_next_page_rx_bd(fp);
3579
3580 /* CQ ring */
3581 bnx2x_set_next_page_rx_cq(fp);
3582
3583 /* BDs */
3584 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3585 if (ring_size < rx_ring_size)
3586 goto alloc_mem_err;
3587 }
3588
3589 return 0;
3590
3591/* handles low memory cases */
3592alloc_mem_err:
3593 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3594 index, ring_size);
3595 /* FW will drop all packets if queue is not big enough,
3596 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003597 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003598 */
3599 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003600 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003601 /* release memory allocated for this queue */
3602 bnx2x_free_fp_mem_at(bp, index);
3603 return -ENOMEM;
3604 }
3605 return 0;
3606}
3607
3608int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3609{
3610 int i;
3611
3612 /**
3613 * 1. Allocate FP for leading - fatal if error
3614 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003615 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3616 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003617 */
3618
3619 /* leading */
3620 if (bnx2x_alloc_fp_mem_at(bp, 0))
3621 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003622
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003623#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003624 if (!NO_FCOE(bp))
3625 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00003626 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003627 /* we will fail load process instead of mark
3628 * NO_FCOE_FLAG
3629 */
3630 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003631#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003632
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003633 /* RSS */
3634 for_each_nondefault_eth_queue(bp, i)
3635 if (bnx2x_alloc_fp_mem_at(bp, i))
3636 break;
3637
3638 /* handle memory failures */
3639 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3640 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3641
3642 WARN_ON(delta < 0);
3643#ifdef BCM_CNIC
3644 /**
3645 * move non eth FPs next to last eth FP
3646 * must be done in that order
3647 * FCOE_IDX < FWD_IDX < OOO_IDX
3648 */
3649
Ariel Elior6383c0b2011-07-14 08:31:57 +00003650 /* move FCoE fp even NO_FCOE_FLAG is on */
Merav Sicron65565882012-06-19 07:48:26 +00003651 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003652#endif
3653 bp->num_queues -= delta;
3654 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3655 bp->num_queues + delta, bp->num_queues);
3656 }
3657
3658 return 0;
3659}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003660
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003661void bnx2x_free_mem_bp(struct bnx2x *bp)
3662{
Barak Witkowski15192a82012-06-19 07:48:28 +00003663 kfree(bp->fp->tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003664 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00003665 kfree(bp->sp_objs);
3666 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00003667 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003668 kfree(bp->msix_table);
3669 kfree(bp->ilt);
3670}
3671
3672int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3673{
3674 struct bnx2x_fastpath *fp;
3675 struct msix_entry *tbl;
3676 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003677 int msix_table_size = 0;
Barak Witkowski15192a82012-06-19 07:48:28 +00003678 int fp_array_size;
3679 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003680
Ariel Elior6383c0b2011-07-14 08:31:57 +00003681 /*
3682 * The biggest MSI-X table we might need is as a maximum number of fast
3683 * path IGU SBs plus default SB (for PF).
3684 */
3685 msix_table_size = bp->igu_sb_cnt + 1;
3686
3687 /* fp array: RSS plus CNIC related L2 queues */
Barak Witkowski15192a82012-06-19 07:48:28 +00003688 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3689 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3690
3691 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003692 if (!fp)
3693 goto alloc_err;
Barak Witkowski15192a82012-06-19 07:48:28 +00003694 for (i = 0; i < fp_array_size; i++) {
3695 fp[i].tpa_info =
3696 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3697 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3698 if (!(fp[i].tpa_info))
3699 goto alloc_err;
3700 }
3701
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003702 bp->fp = fp;
3703
Barak Witkowski15192a82012-06-19 07:48:28 +00003704 /* allocate sp objs */
3705 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3706 GFP_KERNEL);
3707 if (!bp->sp_objs)
3708 goto alloc_err;
3709
3710 /* allocate fp_stats */
3711 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3712 GFP_KERNEL);
3713 if (!bp->fp_stats)
3714 goto alloc_err;
3715
Merav Sicron65565882012-06-19 07:48:26 +00003716 /* Allocate memory for the transmission queues array */
3717 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3718#ifdef BCM_CNIC
3719 bp->bnx2x_txq_size++;
3720#endif
3721 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3722 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3723 if (!bp->bnx2x_txq)
3724 goto alloc_err;
3725
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003726 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003727 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003728 if (!tbl)
3729 goto alloc_err;
3730 bp->msix_table = tbl;
3731
3732 /* ilt */
3733 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3734 if (!ilt)
3735 goto alloc_err;
3736 bp->ilt = ilt;
3737
3738 return 0;
3739alloc_err:
3740 bnx2x_free_mem_bp(bp);
3741 return -ENOMEM;
3742
3743}
3744
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003745int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003746{
3747 struct bnx2x *bp = netdev_priv(dev);
3748
3749 if (unlikely(!netif_running(dev)))
3750 return 0;
3751
3752 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3753 return bnx2x_nic_load(bp, LOAD_NORMAL);
3754}
3755
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003756int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3757{
3758 u32 sel_phy_idx = 0;
3759 if (bp->link_params.num_phys <= 1)
3760 return INT_PHY;
3761
3762 if (bp->link_vars.link_up) {
3763 sel_phy_idx = EXT_PHY1;
3764 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3765 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3766 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3767 sel_phy_idx = EXT_PHY2;
3768 } else {
3769
3770 switch (bnx2x_phy_selection(&bp->link_params)) {
3771 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3772 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3773 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3774 sel_phy_idx = EXT_PHY1;
3775 break;
3776 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3777 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3778 sel_phy_idx = EXT_PHY2;
3779 break;
3780 }
3781 }
3782
3783 return sel_phy_idx;
3784
3785}
3786int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3787{
3788 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3789 /*
3790 * The selected actived PHY is always after swapping (in case PHY
3791 * swapping is enabled). So when swapping is enabled, we need to reverse
3792 * the configuration
3793 */
3794
3795 if (bp->link_params.multi_phy_config &
3796 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3797 if (sel_phy_idx == EXT_PHY1)
3798 sel_phy_idx = EXT_PHY2;
3799 else if (sel_phy_idx == EXT_PHY2)
3800 sel_phy_idx = EXT_PHY1;
3801 }
3802 return LINK_CONFIG_IDX(sel_phy_idx);
3803}
3804
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003805#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3806int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3807{
3808 struct bnx2x *bp = netdev_priv(dev);
3809 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3810
3811 switch (type) {
3812 case NETDEV_FCOE_WWNN:
3813 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3814 cp->fcoe_wwn_node_name_lo);
3815 break;
3816 case NETDEV_FCOE_WWPN:
3817 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3818 cp->fcoe_wwn_port_name_lo);
3819 break;
3820 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003821 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003822 return -EINVAL;
3823 }
3824
3825 return 0;
3826}
3827#endif
3828
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003829/* called with rtnl_lock */
3830int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3831{
3832 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003833
3834 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003835 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003836 return -EAGAIN;
3837 }
3838
3839 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00003840 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3841 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003842 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003843 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003844
3845 /* This does not race with packet allocation
3846 * because the actual alloc size is
3847 * only updated as part of load
3848 */
3849 dev->mtu = new_mtu;
3850
Michał Mirosław66371c42011-04-12 09:38:23 +00003851 return bnx2x_reload_if_running(dev);
3852}
3853
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003854netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003855 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003856{
3857 struct bnx2x *bp = netdev_priv(dev);
3858
3859 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003860 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003861 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003862 features &= ~NETIF_F_GRO;
3863 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003864
3865 return features;
3866}
3867
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003868int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003869{
3870 struct bnx2x *bp = netdev_priv(dev);
3871 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003872 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003873
3874 if (features & NETIF_F_LRO)
3875 flags |= TPA_ENABLE_FLAG;
3876 else
3877 flags &= ~TPA_ENABLE_FLAG;
3878
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003879 if (features & NETIF_F_GRO)
3880 flags |= GRO_ENABLE_FLAG;
3881 else
3882 flags &= ~GRO_ENABLE_FLAG;
3883
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003884 if (features & NETIF_F_LOOPBACK) {
3885 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3886 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3887 bnx2x_reload = true;
3888 }
3889 } else {
3890 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3891 bp->link_params.loopback_mode = LOOPBACK_NONE;
3892 bnx2x_reload = true;
3893 }
3894 }
3895
Michał Mirosław66371c42011-04-12 09:38:23 +00003896 if (flags ^ bp->flags) {
3897 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003898 bnx2x_reload = true;
3899 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003900
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003901 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003902 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3903 return bnx2x_reload_if_running(dev);
3904 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003905 }
3906
Michał Mirosław66371c42011-04-12 09:38:23 +00003907 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003908}
3909
3910void bnx2x_tx_timeout(struct net_device *dev)
3911{
3912 struct bnx2x *bp = netdev_priv(dev);
3913
3914#ifdef BNX2X_STOP_ON_ERROR
3915 if (!bp->panic)
3916 bnx2x_panic();
3917#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003918
3919 smp_mb__before_clear_bit();
3920 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3921 smp_mb__after_clear_bit();
3922
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003923 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003924 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003925}
3926
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003927int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3928{
3929 struct net_device *dev = pci_get_drvdata(pdev);
3930 struct bnx2x *bp;
3931
3932 if (!dev) {
3933 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3934 return -ENODEV;
3935 }
3936 bp = netdev_priv(dev);
3937
3938 rtnl_lock();
3939
3940 pci_save_state(pdev);
3941
3942 if (!netif_running(dev)) {
3943 rtnl_unlock();
3944 return 0;
3945 }
3946
3947 netif_device_detach(dev);
3948
3949 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3950
3951 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3952
3953 rtnl_unlock();
3954
3955 return 0;
3956}
3957
3958int bnx2x_resume(struct pci_dev *pdev)
3959{
3960 struct net_device *dev = pci_get_drvdata(pdev);
3961 struct bnx2x *bp;
3962 int rc;
3963
3964 if (!dev) {
3965 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3966 return -ENODEV;
3967 }
3968 bp = netdev_priv(dev);
3969
3970 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003971 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003972 return -EAGAIN;
3973 }
3974
3975 rtnl_lock();
3976
3977 pci_restore_state(pdev);
3978
3979 if (!netif_running(dev)) {
3980 rtnl_unlock();
3981 return 0;
3982 }
3983
3984 bnx2x_set_power_state(bp, PCI_D0);
3985 netif_device_attach(dev);
3986
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003987 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3988
3989 rtnl_unlock();
3990
3991 return rc;
3992}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003993
3994
3995void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3996 u32 cid)
3997{
3998 /* ustorm cxt validation */
3999 cxt->ustorm_ag_context.cdu_usage =
4000 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4001 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4002 /* xcontext validation */
4003 cxt->xstorm_ag_context.cdu_reserved =
4004 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4005 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4006}
4007
Eric Dumazet1191cb82012-04-27 21:39:21 +00004008static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4009 u8 fw_sb_id, u8 sb_index,
4010 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004011{
4012
4013 u32 addr = BAR_CSTRORM_INTMEM +
4014 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4015 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004016 DP(NETIF_MSG_IFUP,
4017 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4018 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004019}
4020
Eric Dumazet1191cb82012-04-27 21:39:21 +00004021static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4022 u16 fw_sb_id, u8 sb_index,
4023 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004024{
4025 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4026 u32 addr = BAR_CSTRORM_INTMEM +
4027 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4028 u16 flags = REG_RD16(bp, addr);
4029 /* clear and set */
4030 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4031 flags |= enable_flag;
4032 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004033 DP(NETIF_MSG_IFUP,
4034 "port %x fw_sb_id %d sb_index %d disable %d\n",
4035 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004036}
4037
4038void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4039 u8 sb_index, u8 disable, u16 usec)
4040{
4041 int port = BP_PORT(bp);
4042 u8 ticks = usec / BNX2X_BTR;
4043
4044 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4045
4046 disable = disable ? 1 : (usec ? 0 : 1);
4047 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4048}