blob: f82dfff58243fd56d204157db29e1c3c62a68f9d [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000026#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000035 * bnx2x_move_fp - move content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @from: source FP index
39 * @to: destination FP index
40 *
41 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000042 * intact. This is done by first copying the napi struct from
43 * the target to the source, and then mem copying the entire
44 * source onto the target
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Ariel Elior72754082011-11-13 04:34:31 +000050
51 /* Copy the NAPI object as it has been already initialized */
52 from_fp->napi = to_fp->napi;
53
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000054 /* Move bnx2x_fastpath contents */
55 memcpy(to_fp, from_fp, sizeof(*to_fp));
56 to_fp->index = to;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000057}
58
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030059int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000061/* free skb in the packet ring at pos idx
62 * return idx of last bd freed
63 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000064static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000065 u16 idx, unsigned int *pkts_compl,
66 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000067{
Ariel Elior6383c0b2011-07-14 08:31:57 +000068 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000069 struct eth_tx_start_bd *tx_start_bd;
70 struct eth_tx_bd *tx_data_bd;
71 struct sk_buff *skb = tx_buf->skb;
72 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
73 int nbd;
74
75 /* prefetch skb end pointer to speedup dev_kfree_skb() */
76 prefetch(&skb->end);
77
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030078 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +000079 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000080
81 /* unmap first bd */
82 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +000083 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000084 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000085 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000086
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030087
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000088 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
89#ifdef BNX2X_STOP_ON_ERROR
90 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
91 BNX2X_ERR("BAD nbd!\n");
92 bnx2x_panic();
93 }
94#endif
95 new_cons = nbd + tx_buf->first_bd;
96
97 /* Get the next bd */
98 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99
100 /* Skip a parse bd... */
101 --nbd;
102 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
103
104 /* ...and the TSO split header bd since they have no mapping */
105 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
106 --nbd;
107 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
108 }
109
110 /* now free frags */
111 while (nbd > 0) {
112
113 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000114 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
117 if (--nbd)
118 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
119 }
120
121 /* release skb */
122 WARN_ON(!skb);
Tom Herbert2df1a702011-11-28 16:33:37 +0000123 if (skb) {
124 (*pkts_compl)++;
125 (*bytes_compl) += skb->len;
126 }
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000127 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000128 tx_buf->first_bd = 0;
129 tx_buf->skb = NULL;
130
131 return new_cons;
132}
133
Ariel Elior6383c0b2011-07-14 08:31:57 +0000134int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000135{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000137 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000138 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000139
140#ifdef BNX2X_STOP_ON_ERROR
141 if (unlikely(bp->panic))
142 return -1;
143#endif
144
Ariel Elior6383c0b2011-07-14 08:31:57 +0000145 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
146 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
147 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000148
149 while (sw_cons != hw_cons) {
150 u16 pkt_cons;
151
152 pkt_cons = TX_BD(sw_cons);
153
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
155 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000156 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000157
Tom Herbert2df1a702011-11-28 16:33:37 +0000158 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
159 &pkts_compl, &bytes_compl);
160
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000161 sw_cons++;
162 }
163
Tom Herbert2df1a702011-11-28 16:33:37 +0000164 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
165
Ariel Elior6383c0b2011-07-14 08:31:57 +0000166 txdata->tx_pkt_cons = sw_cons;
167 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169 /* Need to make the tx_bd_cons update visible to start_xmit()
170 * before checking for netif_tx_queue_stopped(). Without the
171 * memory barrier, there is a small possibility that
172 * start_xmit() will miss it and cause the queue to be stopped
173 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300174 * On the other hand we need an rmb() here to ensure the proper
175 * ordering of bit testing in the following
176 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000177 */
178 smp_mb();
179
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000180 if (unlikely(netif_tx_queue_stopped(txq))) {
181 /* Taking tx_lock() is needed to prevent reenabling the queue
182 * while it's empty. This could have happen if rx_action() gets
183 * suspended in bnx2x_tx_int() after the condition before
184 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
185 *
186 * stops the queue->sees fresh tx_bd_cons->releases the queue->
187 * sends some packets consuming the whole queue again->
188 * stops the queue
189 */
190
191 __netif_tx_lock(txq, smp_processor_id());
192
193 if ((netif_tx_queue_stopped(txq)) &&
194 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000196 netif_tx_wake_queue(txq);
197
198 __netif_tx_unlock(txq);
199 }
200 return 0;
201}
202
203static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
204 u16 idx)
205{
206 u16 last_max = fp->last_max_sge;
207
208 if (SUB_S16(idx, last_max) > 0)
209 fp->last_max_sge = idx;
210}
211
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000212static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
213 u16 sge_len,
214 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000215{
216 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000217 u16 last_max, last_elem, first_elem;
218 u16 delta = 0;
219 u16 i;
220
221 if (!sge_len)
222 return;
223
224 /* First mark all used pages */
225 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300226 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000227 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000228
229 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000230 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000231
232 /* Here we assume that the last SGE index is the biggest */
233 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000234 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000235 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000236
237 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300238 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
239 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000240
241 /* If ring is not full */
242 if (last_elem + 1 != first_elem)
243 last_elem++;
244
245 /* Now update the prod */
246 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
247 if (likely(fp->sge_mask[i]))
248 break;
249
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300250 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
251 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000252 }
253
254 if (delta > 0) {
255 fp->rx_sge_prod += delta;
256 /* clear page-end entries */
257 bnx2x_clear_sge_mask_next_elems(fp);
258 }
259
260 DP(NETIF_MSG_RX_STATUS,
261 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
262 fp->last_max_sge, fp->rx_sge_prod);
263}
264
Eric Dumazete52fcb22011-11-14 06:05:34 +0000265/* Set Toeplitz hash value in the skb using the value from the
266 * CQE (calculated by HW).
267 */
268static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
269 const struct eth_fast_path_rx_cqe *cqe)
270{
271 /* Set Toeplitz hash from CQE */
272 if ((bp->dev->features & NETIF_F_RXHASH) &&
273 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
274 return le32_to_cpu(cqe->rss_hash_result);
275 return 0;
276}
277
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000278static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000279 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300280 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000281{
282 struct bnx2x *bp = fp->bp;
283 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
284 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
285 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
286 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300287 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
288 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000289
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300290 /* print error if current state != stop */
291 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000292 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
293
Eric Dumazete52fcb22011-11-14 06:05:34 +0000294 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300295 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000296 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300297 fp->rx_buf_size, DMA_FROM_DEVICE);
298 /*
299 * ...if it fails - move the skb from the consumer to the producer
300 * and set the current aggregation state as ERROR to drop it
301 * when TPA_STOP arrives.
302 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000303
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300304 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
305 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000306 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 tpa_info->tpa_state = BNX2X_TPA_ERROR;
308 return;
309 }
310
Eric Dumazete52fcb22011-11-14 06:05:34 +0000311 /* move empty data from pool to prod */
312 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300313 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000314 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000315 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
316 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
317
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300318 /* move partial skb from cons to pool (don't unmap yet) */
319 *first_buf = *cons_rx_buf;
320
321 /* mark bin state as START */
322 tpa_info->parsing_flags =
323 le16_to_cpu(cqe->pars_flags.flags);
324 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
325 tpa_info->tpa_state = BNX2X_TPA_START;
326 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
327 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000328 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000329 if (fp->mode == TPA_MODE_GRO) {
330 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
331 tpa_info->full_page =
332 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
Dmitry Kravkovfe603b42012-02-20 09:59:11 +0000333 /*
334 * FW 7.2.16 BUG workaround:
335 * if SGE size is (exactly) multiple gro_size
336 * fw will place one less frag on SGE.
337 * the calculation is done only for potentially
338 * dangerous MTUs.
339 */
340 if (unlikely(bp->gro_check))
341 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
342 tpa_info->full_page -= gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000343 tpa_info->gro_size = gro_size;
344 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300345
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000346#ifdef BNX2X_STOP_ON_ERROR
347 fp->tpa_queue_used |= (1 << queue);
348#ifdef _ASM_GENERIC_INT_L64_H
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
350#else
351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
352#endif
353 fp->tpa_queue_used);
354#endif
355}
356
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000357/* Timestamp option length allowed for TPA aggregation:
358 *
359 * nop nop kind length echo val
360 */
361#define TPA_TSTAMP_OPT_LEN 12
362/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000363 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000364 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000365 * @bp: driver handle
366 * @parsing_flags: parsing flags from the START CQE
367 * @len_on_bd: total length of the first packet for the
368 * aggregation.
369 *
370 * Approximate value of the MSS for this aggregation calculated using
371 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000372 */
373static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
374 u16 len_on_bd)
375{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300376 /*
377 * TPA arrgregation won't have either IP options or TCP options
378 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000379 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300380 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
381
382 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
383 PRS_FLAG_OVERETH_IPV6)
384 hdrs_len += sizeof(struct ipv6hdr);
385 else /* IPv4 */
386 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000387
388
389 /* Check if there was a TCP timestamp, if there is it's will
390 * always be 12 bytes length: nop nop kind length echo val.
391 *
392 * Otherwise FW would close the aggregation.
393 */
394 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
395 hdrs_len += TPA_TSTAMP_OPT_LEN;
396
397 return len_on_bd - hdrs_len;
398}
399
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000400static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000401 struct bnx2x_agg_info *tpa_info,
402 u16 pages,
403 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300404 struct eth_end_agg_rx_cqe *cqe,
405 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000406{
407 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000408 u32 i, frag_len, frag_size;
409 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300410 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000411 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000412
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300413 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000414
415 if (fp->mode == TPA_MODE_GRO) {
416 gro_size = tpa_info->gro_size;
417 full_page = tpa_info->full_page;
418 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000419
420 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000421 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300422 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
423 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000425 /* set for GRO */
426 if (fp->mode == TPA_MODE_GRO)
427 skb_shinfo(skb)->gso_type =
428 (GET_FLAG(tpa_info->parsing_flags,
429 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
430 PRS_FLAG_OVERETH_IPV6) ?
431 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
432 }
433
434
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000435#ifdef BNX2X_STOP_ON_ERROR
436 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
437 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
438 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300439 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000440 bnx2x_panic();
441 return -EINVAL;
442 }
443#endif
444
445 /* Run through the SGL and compose the fragmented skb */
446 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000448
449 /* FW gives the indices of the SGE as if the ring is an array
450 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000451 if (fp->mode == TPA_MODE_GRO)
452 frag_len = min_t(u32, frag_size, (u32)full_page);
453 else /* LRO */
454 frag_len = min_t(u32, frag_size,
455 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
456
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000457 rx_pg = &fp->rx_page_ring[sge_idx];
458 old_rx_pg = *rx_pg;
459
460 /* If we fail to allocate a substitute page, we simply stop
461 where we are and drop the whole packet */
462 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
463 if (unlikely(err)) {
464 fp->eth_q_stats.rx_skb_alloc_failed++;
465 return err;
466 }
467
468 /* Unmap the page as we r going to pass it to the stack */
469 dma_unmap_page(&bp->pdev->dev,
470 dma_unmap_addr(&old_rx_pg, mapping),
471 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000472 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000473 if (fp->mode == TPA_MODE_LRO)
474 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
475 else { /* GRO */
476 int rem;
477 int offset = 0;
478 for (rem = frag_len; rem > 0; rem -= gro_size) {
479 int len = rem > gro_size ? gro_size : rem;
480 skb_fill_page_desc(skb, frag_id++,
481 old_rx_pg.page, offset, len);
482 if (offset)
483 get_page(old_rx_pg.page);
484 offset += len;
485 }
486 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487
488 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000489 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000490 skb->len += frag_len;
491
492 frag_size -= frag_len;
493 }
494
495 return 0;
496}
497
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000498static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
499 struct bnx2x_agg_info *tpa_info,
500 u16 pages,
501 struct eth_end_agg_rx_cqe *cqe,
502 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000503{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300504 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000505 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300506 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000507 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000508 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300509 u8 old_tpa_state = tpa_info->tpa_state;
510
511 tpa_info->tpa_state = BNX2X_TPA_STOP;
512
513 /* If we there was an error during the handling of the TPA_START -
514 * drop this aggregation.
515 */
516 if (old_tpa_state == BNX2X_TPA_ERROR)
517 goto drop;
518
Eric Dumazete52fcb22011-11-14 06:05:34 +0000519 /* Try to allocate the new data */
520 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000521
522 /* Unmap skb in the pool anyway, as we are going to change
523 pool entry status to BNX2X_TPA_STOP even if new skb allocation
524 fails. */
525 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800526 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000527 if (likely(new_data))
528 skb = build_skb(data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529
Eric Dumazete52fcb22011-11-14 06:05:34 +0000530 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000531#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800532 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000533 BNX2X_ERR("skb_put is about to fail... "
534 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800535 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536 bnx2x_panic();
537 return;
538 }
539#endif
540
Eric Dumazete52fcb22011-11-14 06:05:34 +0000541 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000542 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000543 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000544
545 skb->protocol = eth_type_trans(skb, bp->dev);
546 skb->ip_summed = CHECKSUM_UNNECESSARY;
547
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000548 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
549 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300550 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
551 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000552 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000553 } else {
554 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
555 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000556 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000557 }
558
559
Eric Dumazete52fcb22011-11-14 06:05:34 +0000560 /* put new data in bin */
561 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000562
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300563 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000564 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000565 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300566drop:
567 /* drop the packet and keep the buffer in the bin */
568 DP(NETIF_MSG_RX_STATUS,
569 "Failed to allocate or map a new skb - dropping packet!\n");
570 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000571}
572
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000573
574int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
575{
576 struct bnx2x *bp = fp->bp;
577 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
578 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
579 int rx_pkt = 0;
580
581#ifdef BNX2X_STOP_ON_ERROR
582 if (unlikely(bp->panic))
583 return 0;
584#endif
585
586 /* CQ "next element" is of the size of the regular element,
587 that's why it's ok here */
588 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
589 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
590 hw_comp_cons++;
591
592 bd_cons = fp->rx_bd_cons;
593 bd_prod = fp->rx_bd_prod;
594 bd_prod_fw = bd_prod;
595 sw_comp_cons = fp->rx_comp_cons;
596 sw_comp_prod = fp->rx_comp_prod;
597
598 /* Memory barrier necessary as speculative reads of the rx
599 * buffer can be ahead of the index in the status block
600 */
601 rmb();
602
603 DP(NETIF_MSG_RX_STATUS,
604 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
605 fp->index, hw_comp_cons, sw_comp_cons);
606
607 while (sw_comp_cons != hw_comp_cons) {
608 struct sw_rx_bd *rx_buf = NULL;
609 struct sk_buff *skb;
610 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300611 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000612 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300613 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000614 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000615 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000616
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300617#ifdef BNX2X_STOP_ON_ERROR
618 if (unlikely(bp->panic))
619 return 0;
620#endif
621
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622 comp_ring_cons = RCQ_BD(sw_comp_cons);
623 bd_prod = RX_BD(bd_prod);
624 bd_cons = RX_BD(bd_cons);
625
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000626 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300627 cqe_fp = &cqe->fast_path_cqe;
628 cqe_fp_flags = cqe_fp->type_error_flags;
629 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000630
631 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
632 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300633 cqe_fp_flags, cqe_fp->status_flags,
634 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000635 le16_to_cpu(cqe_fp->vlan_tag),
636 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000637
638 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300639 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000640 bnx2x_sp_event(fp, cqe);
641 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000642 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000643
Eric Dumazete52fcb22011-11-14 06:05:34 +0000644 rx_buf = &fp->rx_buf_ring[bd_cons];
645 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000646
Eric Dumazete52fcb22011-11-14 06:05:34 +0000647 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000648 struct bnx2x_agg_info *tpa_info;
649 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300650#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000651 /* sanity check */
652 if (fp->disable_tpa &&
653 (CQE_TYPE_START(cqe_fp_type) ||
654 CQE_TYPE_STOP(cqe_fp_type)))
655 BNX2X_ERR("START/STOP packet while "
656 "disable_tpa type %x\n",
657 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300658#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000659
Eric Dumazete52fcb22011-11-14 06:05:34 +0000660 if (CQE_TYPE_START(cqe_fp_type)) {
661 u16 queue = cqe_fp->queue_index;
662 DP(NETIF_MSG_RX_STATUS,
663 "calling tpa_start on queue %d\n",
664 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000665
Eric Dumazete52fcb22011-11-14 06:05:34 +0000666 bnx2x_tpa_start(fp, queue,
667 bd_cons, bd_prod,
668 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000670 goto next_rx;
671
672 }
673 queue = cqe->end_agg_cqe.queue_index;
674 tpa_info = &fp->tpa_info[queue];
675 DP(NETIF_MSG_RX_STATUS,
676 "calling tpa_stop on queue %d\n",
677 queue);
678
679 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
680 tpa_info->len_on_bd;
681
682 if (fp->mode == TPA_MODE_GRO)
683 pages = (frag_size + tpa_info->full_page - 1) /
684 tpa_info->full_page;
685 else
686 pages = SGE_PAGE_ALIGN(frag_size) >>
687 SGE_PAGE_SHIFT;
688
689 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
690 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000691#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000692 if (bp->panic)
693 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000694#endif
695
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000696 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
697 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000698 }
699 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000700 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000701 pad = cqe_fp->placement_offset;
702 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000703 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000704 pad + RX_COPY_THRESH,
705 DMA_FROM_DEVICE);
706 pad += NET_SKB_PAD;
707 prefetch(data + pad); /* speedup eth_type_trans() */
708 /* is this an error packet? */
709 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
710 DP(NETIF_MSG_RX_ERR,
711 "ERROR flags %x rx packet %u\n",
712 cqe_fp_flags, sw_comp_cons);
713 fp->eth_q_stats.rx_err_discard_pkt++;
714 goto reuse_rx;
715 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000716
Eric Dumazete52fcb22011-11-14 06:05:34 +0000717 /* Since we don't have a jumbo ring
718 * copy small packets if mtu > 1500
719 */
720 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
721 (len <= RX_COPY_THRESH)) {
722 skb = netdev_alloc_skb_ip_align(bp->dev, len);
723 if (skb == NULL) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000724 DP(NETIF_MSG_RX_ERR,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000725 "ERROR packet dropped because of alloc failure\n");
726 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000727 goto reuse_rx;
728 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000729 memcpy(skb->data, data + pad, len);
730 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
731 } else {
732 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000733 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000734 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800735 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000736 DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000737 skb = build_skb(data);
738 if (unlikely(!skb)) {
739 kfree(data);
740 fp->eth_q_stats.rx_skb_alloc_failed++;
741 goto next_rx;
742 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000743 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000744 } else {
745 DP(NETIF_MSG_RX_ERR,
746 "ERROR packet dropped because "
747 "of alloc failure\n");
748 fp->eth_q_stats.rx_skb_alloc_failed++;
749reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000750 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000751 goto next_rx;
752 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000753 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000754
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000755 skb_put(skb, len);
756 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000757
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000758 /* Set Toeplitz hash for a none-LRO skb */
759 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000760
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000761 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000762
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000763 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300764
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000765 if (likely(BNX2X_RX_CSUM_OK(cqe)))
766 skb->ip_summed = CHECKSUM_UNNECESSARY;
767 else
768 fp->eth_q_stats.hw_csum_err++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000769 }
770
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000771 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000772
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300773 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
774 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000775 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300776 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000777 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000778
779
780next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000781 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000782
783 bd_cons = NEXT_RX_IDX(bd_cons);
784 bd_prod = NEXT_RX_IDX(bd_prod);
785 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
786 rx_pkt++;
787next_cqe:
788 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
789 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
790
791 if (rx_pkt == budget)
792 break;
793 } /* while */
794
795 fp->rx_bd_cons = bd_cons;
796 fp->rx_bd_prod = bd_prod_fw;
797 fp->rx_comp_cons = sw_comp_cons;
798 fp->rx_comp_prod = sw_comp_prod;
799
800 /* Update producers */
801 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
802 fp->rx_sge_prod);
803
804 fp->rx_pkt += rx_pkt;
805 fp->rx_calls++;
806
807 return rx_pkt;
808}
809
810static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
811{
812 struct bnx2x_fastpath *fp = fp_cookie;
813 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000814 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000815
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000816 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
817 "[fp %d fw_sd %d igusb %d]\n",
818 fp->index, fp->fw_sb_id, fp->igu_sb_id);
819 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000820
821#ifdef BNX2X_STOP_ON_ERROR
822 if (unlikely(bp->panic))
823 return IRQ_HANDLED;
824#endif
825
826 /* Handle Rx and Tx according to MSI-X vector */
827 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000828
829 for_each_cos_in_tx_queue(fp, cos)
830 prefetch(fp->txdata[cos].tx_cons_sb);
831
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000832 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000833 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
834
835 return IRQ_HANDLED;
836}
837
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000838/* HW Lock for shared dual port PHYs */
839void bnx2x_acquire_phy_lock(struct bnx2x *bp)
840{
841 mutex_lock(&bp->port.phy_mutex);
842
843 if (bp->port.need_hw_lock)
844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
845}
846
847void bnx2x_release_phy_lock(struct bnx2x *bp)
848{
849 if (bp->port.need_hw_lock)
850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
851
852 mutex_unlock(&bp->port.phy_mutex);
853}
854
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800855/* calculates MF speed according to current linespeed and MF configuration */
856u16 bnx2x_get_mf_speed(struct bnx2x *bp)
857{
858 u16 line_speed = bp->link_vars.line_speed;
859 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000860 u16 maxCfg = bnx2x_extract_max_cfg(bp,
861 bp->mf_config[BP_VN(bp)]);
862
863 /* Calculate the current MAX line speed limit for the MF
864 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800865 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000866 if (IS_MF_SI(bp))
867 line_speed = (line_speed * maxCfg) / 100;
868 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800869 u16 vn_max_rate = maxCfg * 100;
870
871 if (vn_max_rate < line_speed)
872 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000873 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800874 }
875
876 return line_speed;
877}
878
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000879/**
880 * bnx2x_fill_report_data - fill link report data to report
881 *
882 * @bp: driver handle
883 * @data: link state to update
884 *
885 * It uses a none-atomic bit operations because is called under the mutex.
886 */
887static inline void bnx2x_fill_report_data(struct bnx2x *bp,
888 struct bnx2x_link_report_data *data)
889{
890 u16 line_speed = bnx2x_get_mf_speed(bp);
891
892 memset(data, 0, sizeof(*data));
893
894 /* Fill the report data: efective line speed */
895 data->line_speed = line_speed;
896
897 /* Link is down */
898 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
899 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
900 &data->link_report_flags);
901
902 /* Full DUPLEX */
903 if (bp->link_vars.duplex == DUPLEX_FULL)
904 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
905
906 /* Rx Flow Control is ON */
907 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
908 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
909
910 /* Tx Flow Control is ON */
911 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
912 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
913}
914
915/**
916 * bnx2x_link_report - report link status to OS.
917 *
918 * @bp: driver handle
919 *
920 * Calls the __bnx2x_link_report() under the same locking scheme
921 * as a link/PHY state managing code to ensure a consistent link
922 * reporting.
923 */
924
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000925void bnx2x_link_report(struct bnx2x *bp)
926{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000927 bnx2x_acquire_phy_lock(bp);
928 __bnx2x_link_report(bp);
929 bnx2x_release_phy_lock(bp);
930}
931
932/**
933 * __bnx2x_link_report - report link status to OS.
934 *
935 * @bp: driver handle
936 *
937 * None atomic inmlementation.
938 * Should be called under the phy_lock.
939 */
940void __bnx2x_link_report(struct bnx2x *bp)
941{
942 struct bnx2x_link_report_data cur_data;
943
944 /* reread mf_cfg */
945 if (!CHIP_IS_E1(bp))
946 bnx2x_read_mf_cfg(bp);
947
948 /* Read the current link report info */
949 bnx2x_fill_report_data(bp, &cur_data);
950
951 /* Don't report link down or exactly the same link status twice */
952 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
953 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
954 &bp->last_reported_link.link_report_flags) &&
955 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
956 &cur_data.link_report_flags)))
957 return;
958
959 bp->link_cnt++;
960
961 /* We are going to report a new link parameters now -
962 * remember the current data for the next time.
963 */
964 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
965
966 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
967 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000968 netif_carrier_off(bp->dev);
969 netdev_err(bp->dev, "NIC Link is Down\n");
970 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000971 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000972 const char *duplex;
973 const char *flow;
974
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000975 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000976
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000977 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
978 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000979 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980 else
Joe Perches94f05b02011-08-14 12:16:20 +0000981 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000982
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000983 /* Handle the FC at the end so that only these flags would be
984 * possibly set. This way we may easily check if there is no FC
985 * enabled.
986 */
987 if (cur_data.link_report_flags) {
988 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
989 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000990 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
991 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000992 flow = "ON - receive & transmit";
993 else
994 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000995 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000996 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000997 }
Joe Perches94f05b02011-08-14 12:16:20 +0000998 } else {
999 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001000 }
Joe Perches94f05b02011-08-14 12:16:20 +00001001 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1002 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001003 }
1004}
1005
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001006void bnx2x_init_rx_rings(struct bnx2x *bp)
1007{
1008 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001009 u16 ring_prod;
1010 int i, j;
1011
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001012 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001013 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001014 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001015
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001016 DP(NETIF_MSG_IFUP,
1017 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1018
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001019 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001020 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001021 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001022 struct bnx2x_agg_info *tpa_info =
1023 &fp->tpa_info[i];
1024 struct sw_rx_bd *first_buf =
1025 &tpa_info->first_buf;
1026
Eric Dumazete52fcb22011-11-14 06:05:34 +00001027 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1028 GFP_ATOMIC);
1029 if (!first_buf->data) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001030 BNX2X_ERR("Failed to allocate TPA "
1031 "skb pool for queue[%d] - "
1032 "disabling TPA on this "
1033 "queue!\n", j);
1034 bnx2x_free_tpa_pool(bp, fp, i);
1035 fp->disable_tpa = 1;
1036 break;
1037 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001038 dma_unmap_addr_set(first_buf, mapping, 0);
1039 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001040 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001041
1042 /* "next page" elements initialization */
1043 bnx2x_set_next_page_sgl(fp);
1044
1045 /* set SGEs bit mask */
1046 bnx2x_init_sge_ring_bit_mask(fp);
1047
1048 /* Allocate SGEs and initialize the ring elements */
1049 for (i = 0, ring_prod = 0;
1050 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1051
1052 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1053 BNX2X_ERR("was only able to allocate "
1054 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001055 BNX2X_ERR("disabling TPA for "
1056 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001057 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001058 bnx2x_free_rx_sge_range(bp, fp,
1059 ring_prod);
1060 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001061 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001062 fp->disable_tpa = 1;
1063 ring_prod = 0;
1064 break;
1065 }
1066 ring_prod = NEXT_SGE_IDX(ring_prod);
1067 }
1068
1069 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001070 }
1071 }
1072
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001073 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001074 struct bnx2x_fastpath *fp = &bp->fp[j];
1075
1076 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001078 /* Activate BD ring */
1079 /* Warning!
1080 * this will generate an interrupt (to the TSTORM)
1081 * must only be done after chip is initialized
1082 */
1083 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1084 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001085
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086 if (j != 0)
1087 continue;
1088
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001089 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001090 REG_WR(bp, BAR_USTRORM_INTMEM +
1091 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1092 U64_LO(fp->rx_comp_mapping));
1093 REG_WR(bp, BAR_USTRORM_INTMEM +
1094 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1095 U64_HI(fp->rx_comp_mapping));
1096 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001097 }
1098}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001099
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001100static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1101{
1102 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001103 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001104
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001105 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001106 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001107 for_each_cos_in_tx_queue(fp, cos) {
1108 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001109 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001110
Ariel Elior6383c0b2011-07-14 08:31:57 +00001111 u16 sw_prod = txdata->tx_pkt_prod;
1112 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001113
Ariel Elior6383c0b2011-07-14 08:31:57 +00001114 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001115 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1116 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001117 sw_cons++;
1118 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001119 netdev_tx_reset_queue(
1120 netdev_get_tx_queue(bp->dev, txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001121 }
1122 }
1123}
1124
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001125static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1126{
1127 struct bnx2x *bp = fp->bp;
1128 int i;
1129
1130 /* ring wasn't allocated */
1131 if (fp->rx_buf_ring == NULL)
1132 return;
1133
1134 for (i = 0; i < NUM_RX_BD; i++) {
1135 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001136 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001137
Eric Dumazete52fcb22011-11-14 06:05:34 +00001138 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001139 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001140 dma_unmap_single(&bp->pdev->dev,
1141 dma_unmap_addr(rx_buf, mapping),
1142 fp->rx_buf_size, DMA_FROM_DEVICE);
1143
Eric Dumazete52fcb22011-11-14 06:05:34 +00001144 rx_buf->data = NULL;
1145 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001146 }
1147}
1148
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001149static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1150{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001151 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001152
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001153 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001154 struct bnx2x_fastpath *fp = &bp->fp[j];
1155
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001156 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001157
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001158 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001159 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001160 }
1161}
1162
1163void bnx2x_free_skbs(struct bnx2x *bp)
1164{
1165 bnx2x_free_tx_skbs(bp);
1166 bnx2x_free_rx_skbs(bp);
1167}
1168
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001169void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1170{
1171 /* load old values */
1172 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1173
1174 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1175 /* leave all but MAX value */
1176 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1177
1178 /* set new MAX value */
1179 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1180 & FUNC_MF_CFG_MAX_BW_MASK;
1181
1182 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1183 }
1184}
1185
Dmitry Kravkovca924292011-06-14 01:33:08 +00001186/**
1187 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1188 *
1189 * @bp: driver handle
1190 * @nvecs: number of vectors to be released
1191 */
1192static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001193{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001194 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001195
Dmitry Kravkovca924292011-06-14 01:33:08 +00001196 if (nvecs == offset)
1197 return;
1198 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001199 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001200 bp->msix_table[offset].vector);
1201 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001202#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001203 if (nvecs == offset)
1204 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001205 offset++;
1206#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001207
Dmitry Kravkovca924292011-06-14 01:33:08 +00001208 for_each_eth_queue(bp, i) {
1209 if (nvecs == offset)
1210 return;
1211 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1212 "irq\n", i, bp->msix_table[offset].vector);
1213
1214 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215 }
1216}
1217
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001218void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001219{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001220 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001221 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001222 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001223 else if (bp->flags & USING_MSI_FLAG)
1224 free_irq(bp->pdev->irq, bp->dev);
1225 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001226 free_irq(bp->pdev->irq, bp->dev);
1227}
1228
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001229int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001230{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001231 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001233 bp->msix_table[msix_vec].entry = msix_vec;
1234 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1235 bp->msix_table[0].entry);
1236 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001237
1238#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001239 bp->msix_table[msix_vec].entry = msix_vec;
1240 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1241 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1242 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001243#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001244 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001245 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001246 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001247 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001248 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1249 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001250 }
1251
Ariel Elior6383c0b2011-07-14 08:31:57 +00001252 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001253
1254 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001255
1256 /*
1257 * reconfigure number of tx/rx queues according to available
1258 * MSI-X vectors
1259 */
1260 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001261 /* how less vectors we will have? */
1262 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001263
1264 DP(NETIF_MSG_IFUP,
1265 "Trying to use less MSI-X vectors: %d\n", rc);
1266
1267 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1268
1269 if (rc) {
1270 DP(NETIF_MSG_IFUP,
1271 "MSI-X is not attainable rc %d\n", rc);
1272 return rc;
1273 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001274 /*
1275 * decrease number of queues by number of unallocated entries
1276 */
1277 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278
1279 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1280 bp->num_queues);
1281 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001282 /* fall to INTx if not enough memory */
1283 if (rc == -ENOMEM)
1284 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001285 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1286 return rc;
1287 }
1288
1289 bp->flags |= USING_MSIX_FLAG;
1290
1291 return 0;
1292}
1293
1294static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1295{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001296 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001297
Dmitry Kravkovca924292011-06-14 01:33:08 +00001298 rc = request_irq(bp->msix_table[offset++].vector,
1299 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300 bp->dev->name, bp->dev);
1301 if (rc) {
1302 BNX2X_ERR("request sp irq failed\n");
1303 return -EBUSY;
1304 }
1305
1306#ifdef BCM_CNIC
1307 offset++;
1308#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001309 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001310 struct bnx2x_fastpath *fp = &bp->fp[i];
1311 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1312 bp->dev->name, i);
1313
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001314 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001315 bnx2x_msix_fp_int, 0, fp->name, fp);
1316 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001317 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1318 bp->msix_table[offset].vector, rc);
1319 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320 return -EBUSY;
1321 }
1322
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001323 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001324 }
1325
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001326 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001327 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001328 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1329 " ... fp[%d] %d\n",
1330 bp->msix_table[0].vector,
1331 0, bp->msix_table[offset].vector,
1332 i - 1, bp->msix_table[offset + i - 1].vector);
1333
1334 return 0;
1335}
1336
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001337int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001338{
1339 int rc;
1340
1341 rc = pci_enable_msi(bp->pdev);
1342 if (rc) {
1343 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1344 return -1;
1345 }
1346 bp->flags |= USING_MSI_FLAG;
1347
1348 return 0;
1349}
1350
1351static int bnx2x_req_irq(struct bnx2x *bp)
1352{
1353 unsigned long flags;
1354 int rc;
1355
1356 if (bp->flags & USING_MSI_FLAG)
1357 flags = 0;
1358 else
1359 flags = IRQF_SHARED;
1360
1361 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1362 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363 return rc;
1364}
1365
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001366static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1367{
1368 int rc = 0;
1369 if (bp->flags & USING_MSIX_FLAG) {
1370 rc = bnx2x_req_msix_irqs(bp);
1371 if (rc)
1372 return rc;
1373 } else {
1374 bnx2x_ack_int(bp);
1375 rc = bnx2x_req_irq(bp);
1376 if (rc) {
1377 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1378 return rc;
1379 }
1380 if (bp->flags & USING_MSI_FLAG) {
1381 bp->dev->irq = bp->pdev->irq;
1382 netdev_info(bp->dev, "using MSI IRQ %d\n",
1383 bp->pdev->irq);
1384 }
1385 }
1386
1387 return 0;
1388}
1389
1390static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001391{
1392 int i;
1393
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001394 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001395 napi_enable(&bnx2x_fp(bp, i, napi));
1396}
1397
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001398static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001399{
1400 int i;
1401
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001402 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001403 napi_disable(&bnx2x_fp(bp, i, napi));
1404}
1405
1406void bnx2x_netif_start(struct bnx2x *bp)
1407{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001408 if (netif_running(bp->dev)) {
1409 bnx2x_napi_enable(bp);
1410 bnx2x_int_enable(bp);
1411 if (bp->state == BNX2X_STATE_OPEN)
1412 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413 }
1414}
1415
1416void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1417{
1418 bnx2x_int_disable_sync(bp, disable_hw);
1419 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001420}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001421
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001422u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1423{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001424 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001425
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001426#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001427 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001428 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1429 u16 ether_type = ntohs(hdr->h_proto);
1430
1431 /* Skip VLAN tag if present */
1432 if (ether_type == ETH_P_8021Q) {
1433 struct vlan_ethhdr *vhdr =
1434 (struct vlan_ethhdr *)skb->data;
1435
1436 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1437 }
1438
1439 /* If ethertype is FCoE or FIP - use FCoE ring */
1440 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001441 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001442 }
1443#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001444 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001445 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001446}
1447
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001448void bnx2x_set_num_queues(struct bnx2x *bp)
1449{
1450 switch (bp->multi_mode) {
1451 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001452 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001453 break;
1454 case ETH_RSS_MODE_REGULAR:
1455 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001456 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001457
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001459 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001460 break;
1461 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001462
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001463#ifdef BCM_CNIC
1464 /* override in ISCSI SD mod */
1465 if (IS_MF_ISCSI_SD(bp))
1466 bp->num_queues = 1;
1467#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001468 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001469 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001470}
1471
David S. Miller823dcd22011-08-20 10:39:12 -07001472/**
1473 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1474 *
1475 * @bp: Driver handle
1476 *
1477 * We currently support for at most 16 Tx queues for each CoS thus we will
1478 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1479 * bp->max_cos.
1480 *
1481 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1482 * index after all ETH L2 indices.
1483 *
1484 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1485 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1486 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1487 *
1488 * The proper configuration of skb->queue_mapping is handled by
1489 * bnx2x_select_queue() and __skb_tx_hash().
1490 *
1491 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1492 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1493 */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001494static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1495{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001496 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001497
Ariel Elior6383c0b2011-07-14 08:31:57 +00001498 tx = MAX_TXQS_PER_COS * bp->max_cos;
1499 rx = BNX2X_NUM_ETH_QUEUES(bp);
1500
1501/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001502#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001503 if (!NO_FCOE(bp)) {
1504 rx += FCOE_PRESENT;
1505 tx += FCOE_PRESENT;
1506 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001507#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001508
1509 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1510 if (rc) {
1511 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1512 return rc;
1513 }
1514 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1515 if (rc) {
1516 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1517 return rc;
1518 }
1519
1520 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1521 tx, rx);
1522
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001523 return rc;
1524}
1525
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001526static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1527{
1528 int i;
1529
1530 for_each_queue(bp, i) {
1531 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001532 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001533
1534 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1535 if (IS_FCOE_IDX(i))
1536 /*
1537 * Although there are no IP frames expected to arrive to
1538 * this ring we still want to add an
1539 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1540 * overrun attack.
1541 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001542 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001543 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001544 mtu = bp->dev->mtu;
1545 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1546 IP_HEADER_ALIGNMENT_PADDING +
1547 ETH_OVREHEAD +
1548 mtu +
1549 BNX2X_FW_RX_ALIGN_END;
1550 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001551 }
1552}
1553
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001554static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1555{
1556 int i;
1557 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1558 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1559
1560 /*
1561 * Prepare the inital contents fo the indirection table if RSS is
1562 * enabled
1563 */
1564 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1565 for (i = 0; i < sizeof(ind_table); i++)
1566 ind_table[i] =
Ben Hutchings278bc422011-12-15 13:56:49 +00001567 bp->fp->cl_id +
1568 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001569 }
1570
1571 /*
1572 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1573 * per-port, so if explicit configuration is needed , do it only
1574 * for a PMF.
1575 *
1576 * For 57712 and newer on the other hand it's a per-function
1577 * configuration.
1578 */
1579 return bnx2x_config_rss_pf(bp, ind_table,
1580 bp->port.pmf || !CHIP_IS_E1x(bp));
1581}
1582
1583int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1584{
1585 struct bnx2x_config_rss_params params = {0};
1586 int i;
1587
1588 /* Although RSS is meaningless when there is a single HW queue we
1589 * still need it enabled in order to have HW Rx hash generated.
1590 *
1591 * if (!is_eth_multi(bp))
1592 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1593 */
1594
1595 params.rss_obj = &bp->rss_conf_obj;
1596
1597 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1598
1599 /* RSS mode */
1600 switch (bp->multi_mode) {
1601 case ETH_RSS_MODE_DISABLED:
1602 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1603 break;
1604 case ETH_RSS_MODE_REGULAR:
1605 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1606 break;
1607 case ETH_RSS_MODE_VLAN_PRI:
1608 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1609 break;
1610 case ETH_RSS_MODE_E1HOV_PRI:
1611 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1612 break;
1613 case ETH_RSS_MODE_IP_DSCP:
1614 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1615 break;
1616 default:
1617 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1618 return -EINVAL;
1619 }
1620
1621 /* If RSS is enabled */
1622 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1623 /* RSS configuration */
1624 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1625 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1626 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1627 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1628
1629 /* Hash bits */
1630 params.rss_result_mask = MULTI_MASK;
1631
1632 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1633
1634 if (config_hash) {
1635 /* RSS keys */
1636 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1637 params.rss_key[i] = random32();
1638
1639 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1640 }
1641 }
1642
1643 return bnx2x_config_rss(bp, &params);
1644}
1645
1646static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1647{
1648 struct bnx2x_func_state_params func_params = {0};
1649
1650 /* Prepare parameters for function state transitions */
1651 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1652
1653 func_params.f_obj = &bp->func_obj;
1654 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1655
1656 func_params.params.hw_init.load_phase = load_code;
1657
1658 return bnx2x_func_state_change(bp, &func_params);
1659}
1660
1661/*
1662 * Cleans the object that have internal lists without sending
1663 * ramrods. Should be run when interrutps are disabled.
1664 */
1665static void bnx2x_squeeze_objects(struct bnx2x *bp)
1666{
1667 int rc;
1668 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1669 struct bnx2x_mcast_ramrod_params rparam = {0};
1670 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1671
1672 /***************** Cleanup MACs' object first *************************/
1673
1674 /* Wait for completion of requested */
1675 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1676 /* Perform a dry cleanup */
1677 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1678
1679 /* Clean ETH primary MAC */
1680 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1681 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1682 &ramrod_flags);
1683 if (rc != 0)
1684 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1685
1686 /* Cleanup UC list */
1687 vlan_mac_flags = 0;
1688 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1689 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1690 &ramrod_flags);
1691 if (rc != 0)
1692 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1693
1694 /***************** Now clean mcast object *****************************/
1695 rparam.mcast_obj = &bp->mcast_obj;
1696 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1697
1698 /* Add a DEL command... */
1699 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1700 if (rc < 0)
1701 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1702 "object: %d\n", rc);
1703
1704 /* ...and wait until all pending commands are cleared */
1705 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1706 while (rc != 0) {
1707 if (rc < 0) {
1708 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1709 rc);
1710 return;
1711 }
1712
1713 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1714 }
1715}
1716
1717#ifndef BNX2X_STOP_ON_ERROR
1718#define LOAD_ERROR_EXIT(bp, label) \
1719 do { \
1720 (bp)->state = BNX2X_STATE_ERROR; \
1721 goto label; \
1722 } while (0)
1723#else
1724#define LOAD_ERROR_EXIT(bp, label) \
1725 do { \
1726 (bp)->state = BNX2X_STATE_ERROR; \
1727 (bp)->panic = 1; \
1728 return -EBUSY; \
1729 } while (0)
1730#endif
1731
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001732/* must be called with rtnl_lock */
1733int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1734{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001735 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001736 u32 load_code;
1737 int i, rc;
1738
1739#ifdef BNX2X_STOP_ON_ERROR
1740 if (unlikely(bp->panic))
1741 return -EPERM;
1742#endif
1743
1744 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1745
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001746 /* Set the initial link reported state to link down */
1747 bnx2x_acquire_phy_lock(bp);
1748 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1749 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1750 &bp->last_reported_link.link_report_flags);
1751 bnx2x_release_phy_lock(bp);
1752
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001753 /* must be called before memory allocation and HW init */
1754 bnx2x_ilt_set_info(bp);
1755
Ariel Elior6383c0b2011-07-14 08:31:57 +00001756 /*
1757 * Zero fastpath structures preserving invariants like napi, which are
1758 * allocated only once, fp index, max_cos, bp pointer.
1759 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001760 */
1761 for_each_queue(bp, i)
1762 bnx2x_bz_fp(bp, i);
1763
Ariel Elior6383c0b2011-07-14 08:31:57 +00001764
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001765 /* Set the receive queues buffer size */
1766 bnx2x_set_rx_buf_size(bp);
1767
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001768 if (bnx2x_alloc_mem(bp))
1769 return -ENOMEM;
1770
1771 /* As long as bnx2x_alloc_mem() may possibly update
1772 * bp->num_queues, bnx2x_set_real_num_queues() should always
1773 * come after it.
1774 */
1775 rc = bnx2x_set_real_num_queues(bp);
1776 if (rc) {
1777 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001778 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001779 }
1780
Ariel Elior6383c0b2011-07-14 08:31:57 +00001781 /* configure multi cos mappings in kernel.
1782 * this configuration may be overriden by a multi class queue discipline
1783 * or by a dcbx negotiation result.
1784 */
1785 bnx2x_setup_tc(bp->dev, bp->max_cos);
1786
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001787 bnx2x_napi_enable(bp);
1788
Ariel Elior889b9af2012-01-26 06:01:51 +00001789 /* set pf load just before approaching the MCP */
1790 bnx2x_set_pf_load(bp);
1791
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001792 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001793 * Returns the type of LOAD command:
1794 * if it is the first port to be initialized
1795 * common blocks should be initialized, otherwise - not
1796 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001797 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00001798 /* init fw_seq */
1799 bp->fw_seq =
1800 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1801 DRV_MSG_SEQ_NUMBER_MASK);
1802 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1803
1804 /* Get current FW pulse sequence */
1805 bp->fw_drv_pulse_wr_seq =
1806 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1807 DRV_PULSE_SEQ_MASK);
1808 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1809
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001810 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001811 if (!load_code) {
1812 BNX2X_ERR("MCP response failure, aborting\n");
1813 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001814 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001815 }
1816 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1817 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001818 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001819 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00001820 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1821 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1822 /* build FW version dword */
1823 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1824 (BCM_5710_FW_MINOR_VERSION << 8) +
1825 (BCM_5710_FW_REVISION_VERSION << 16) +
1826 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1827
1828 /* read loaded FW from chip */
1829 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1830
1831 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1832 loaded_fw, my_fw);
1833
1834 /* abort nic load if version mismatch */
1835 if (my_fw != loaded_fw) {
1836 BNX2X_ERR("bnx2x with FW %x already loaded, "
1837 "which mismatches my %x FW. aborting",
1838 loaded_fw, my_fw);
1839 rc = -EBUSY;
1840 LOAD_ERROR_EXIT(bp, load_error2);
1841 }
1842 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001843
1844 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001845 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001847 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1848 path, load_count[path][0], load_count[path][1],
1849 load_count[path][2]);
1850 load_count[path][0]++;
1851 load_count[path][1 + port]++;
1852 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1853 path, load_count[path][0], load_count[path][1],
1854 load_count[path][2]);
1855 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001856 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001857 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001858 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1859 else
1860 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1861 }
1862
1863 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001864 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001865 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001866 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001867 /*
1868 * We need the barrier to ensure the ordering between the
1869 * writing to bp->port.pmf here and reading it from the
1870 * bnx2x_periodic_task().
1871 */
1872 smp_mb();
1873 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1874 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001875 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001876
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001877 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1878
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001879 /* Init Function state controlling object */
1880 bnx2x__init_func_obj(bp);
1881
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001882 /* Initialize HW */
1883 rc = bnx2x_init_hw(bp, load_code);
1884 if (rc) {
1885 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001886 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001887 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001888 }
1889
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001890 /* Connect to IRQs */
1891 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001892 if (rc) {
1893 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001894 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001895 }
1896
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001897 /* Setup NIC internals and enable interrupts */
1898 bnx2x_nic_init(bp, load_code);
1899
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001900 /* Init per-function objects */
1901 bnx2x_init_bp_objs(bp);
1902
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001903 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1904 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001905 (bp->common.shmem2_base)) {
1906 if (SHMEM2_HAS(bp, dcc_support))
1907 SHMEM2_WR(bp, dcc_support,
1908 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1909 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1910 }
1911
1912 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1913 rc = bnx2x_func_start(bp);
1914 if (rc) {
1915 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001916 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001917 LOAD_ERROR_EXIT(bp, load_error3);
1918 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001919
1920 /* Send LOAD_DONE command to MCP */
1921 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001922 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001923 if (!load_code) {
1924 BNX2X_ERR("MCP response failure, aborting\n");
1925 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001926 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001927 }
1928 }
1929
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001930 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001931 if (rc) {
1932 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001933 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001934 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001935
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001936#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001937 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001938 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001939#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001940
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001941 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001942 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001943 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001944 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001945 }
1946
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001947 rc = bnx2x_init_rss_pf(bp);
1948 if (rc)
1949 LOAD_ERROR_EXIT(bp, load_error4);
1950
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001951 /* Now when Clients are configured we are ready to work */
1952 bp->state = BNX2X_STATE_OPEN;
1953
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001954 /* Configure a ucast MAC */
1955 rc = bnx2x_set_eth_mac(bp, true);
1956 if (rc)
1957 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001958
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001959 if (bp->pending_max) {
1960 bnx2x_update_max_mf_config(bp, bp->pending_max);
1961 bp->pending_max = 0;
1962 }
1963
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001964 if (bp->port.pmf)
1965 bnx2x_initial_phy_init(bp, load_mode);
1966
1967 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001968
1969 /* Initialize Rx filter. */
1970 netif_addr_lock_bh(bp->dev);
1971 bnx2x_set_rx_mode(bp->dev);
1972 netif_addr_unlock_bh(bp->dev);
1973
1974 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001975 switch (load_mode) {
1976 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001977 /* Tx queue should be only reenabled */
1978 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001979 break;
1980
1981 case LOAD_OPEN:
1982 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001983 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001984 break;
1985
1986 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001987 bp->state = BNX2X_STATE_DIAG;
1988 break;
1989
1990 default:
1991 break;
1992 }
1993
Dmitry Kravkov00253a82011-11-13 04:34:25 +00001994 if (bp->port.pmf)
1995 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1996 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001997 bnx2x__link_status_update(bp);
1998
1999 /* start the timer */
2000 mod_timer(&bp->timer, jiffies + bp->current_interval);
2001
2002#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002003 /* re-read iscsi info */
2004 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002005 bnx2x_setup_cnic_irq_info(bp);
2006 if (bp->state == BNX2X_STATE_OPEN)
2007 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2008#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002009
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002010 /* Wait for all pending SP commands to complete */
2011 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2012 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2013 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2014 return -EBUSY;
2015 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002016
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002017 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002018 return 0;
2019
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002020#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002021load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002022#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002023 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002024 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002025#endif
2026load_error3:
2027 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002028
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002029 /* Clean queueable objects */
2030 bnx2x_squeeze_objects(bp);
2031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002032 /* Free SKBs, SGEs, TPA pool and driver internals */
2033 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002034 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002035 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002036
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002037 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002038 bnx2x_free_irq(bp);
2039load_error2:
2040 if (!BP_NOMCP(bp)) {
2041 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2042 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2043 }
2044
2045 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002046load_error1:
2047 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002048 /* clear pf_load status, as it was already set */
2049 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002050load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002051 bnx2x_free_mem(bp);
2052
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002053 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002054#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002055}
2056
2057/* must be called with rtnl_lock */
2058int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2059{
2060 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002061 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002062
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002063 if ((bp->state == BNX2X_STATE_CLOSED) ||
2064 (bp->state == BNX2X_STATE_ERROR)) {
2065 /* We can get here if the driver has been unloaded
2066 * during parity error recovery and is either waiting for a
2067 * leader to complete or for other functions to unload and
2068 * then ifdown has been issued. In this case we want to
2069 * unload and let other functions to complete a recovery
2070 * process.
2071 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002072 bp->recovery_state = BNX2X_RECOVERY_DONE;
2073 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002074 bnx2x_release_leader_lock(bp);
2075 smp_mb();
2076
2077 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002078
2079 return -EINVAL;
2080 }
2081
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002082 /*
2083 * It's important to set the bp->state to the value different from
2084 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2085 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2086 */
2087 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2088 smp_mb();
2089
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002090 /* Stop Tx */
2091 bnx2x_tx_disable(bp);
2092
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002093#ifdef BCM_CNIC
2094 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2095#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002096
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002097 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002098
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002099 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002100
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002101 /* Set ALWAYS_ALIVE bit in shmem */
2102 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2103
2104 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002105
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002106 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002107 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002108
2109 /* Cleanup the chip if needed */
2110 if (unload_mode != UNLOAD_RECOVERY)
2111 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002112 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002113 /* Send the UNLOAD_REQUEST to the MCP */
2114 bnx2x_send_unload_req(bp, unload_mode);
2115
2116 /*
2117 * Prevent transactions to host from the functions on the
2118 * engine that doesn't reset global blocks in case of global
2119 * attention once gloabl blocks are reset and gates are opened
2120 * (the engine which leader will perform the recovery
2121 * last).
2122 */
2123 if (!CHIP_IS_E1x(bp))
2124 bnx2x_pf_disable(bp);
2125
2126 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002127 bnx2x_netif_stop(bp, 1);
2128
2129 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002130 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002131
2132 /* Report UNLOAD_DONE to MCP */
2133 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002134 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002135
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002136 /*
2137 * At this stage no more interrupts will arrive so we may safly clean
2138 * the queueable objects here in case they failed to get cleaned so far.
2139 */
2140 bnx2x_squeeze_objects(bp);
2141
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002142 /* There should be no more pending SP commands at this stage */
2143 bp->sp_state = 0;
2144
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002145 bp->port.pmf = 0;
2146
2147 /* Free SKBs, SGEs, TPA pool and driver internals */
2148 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002149 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002150 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002151
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002152 bnx2x_free_mem(bp);
2153
2154 bp->state = BNX2X_STATE_CLOSED;
2155
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002156 /* Check if there are pending parity attentions. If there are - set
2157 * RECOVERY_IN_PROGRESS.
2158 */
2159 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2160 bnx2x_set_reset_in_progress(bp);
2161
2162 /* Set RESET_IS_GLOBAL if needed */
2163 if (global)
2164 bnx2x_set_reset_global(bp);
2165 }
2166
2167
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002168 /* The last driver must disable a "close the gate" if there is no
2169 * parity attention or "process kill" pending.
2170 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002171 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002172 bnx2x_disable_close_the_gate(bp);
2173
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174 return 0;
2175}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002176
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002177int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2178{
2179 u16 pmcsr;
2180
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002181 /* If there is no power capability, silently succeed */
2182 if (!bp->pm_cap) {
2183 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2184 return 0;
2185 }
2186
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002187 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2188
2189 switch (state) {
2190 case PCI_D0:
2191 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2192 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2193 PCI_PM_CTRL_PME_STATUS));
2194
2195 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2196 /* delay required during transition out of D3hot */
2197 msleep(20);
2198 break;
2199
2200 case PCI_D3hot:
2201 /* If there are other clients above don't
2202 shut down the power */
2203 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2204 return 0;
2205 /* Don't shut down the power for emulation and FPGA */
2206 if (CHIP_REV_IS_SLOW(bp))
2207 return 0;
2208
2209 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2210 pmcsr |= 3;
2211
2212 if (bp->wol)
2213 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2214
2215 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2216 pmcsr);
2217
2218 /* No more memory access after this point until
2219 * device is brought back to D0.
2220 */
2221 break;
2222
2223 default:
2224 return -EINVAL;
2225 }
2226 return 0;
2227}
2228
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002229/*
2230 * net_device service functions
2231 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002232int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002233{
2234 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002235 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002236 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2237 napi);
2238 struct bnx2x *bp = fp->bp;
2239
2240 while (1) {
2241#ifdef BNX2X_STOP_ON_ERROR
2242 if (unlikely(bp->panic)) {
2243 napi_complete(napi);
2244 return 0;
2245 }
2246#endif
2247
Ariel Elior6383c0b2011-07-14 08:31:57 +00002248 for_each_cos_in_tx_queue(fp, cos)
2249 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2250 bnx2x_tx_int(bp, &fp->txdata[cos]);
2251
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002252
2253 if (bnx2x_has_rx_work(fp)) {
2254 work_done += bnx2x_rx_int(fp, budget - work_done);
2255
2256 /* must not complete if we consumed full budget */
2257 if (work_done >= budget)
2258 break;
2259 }
2260
2261 /* Fall out from the NAPI loop if needed */
2262 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002263#ifdef BCM_CNIC
2264 /* No need to update SB for FCoE L2 ring as long as
2265 * it's connected to the default SB and the SB
2266 * has been updated when NAPI was scheduled.
2267 */
2268 if (IS_FCOE_FP(fp)) {
2269 napi_complete(napi);
2270 break;
2271 }
2272#endif
2273
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002274 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002275 /* bnx2x_has_rx_work() reads the status block,
2276 * thus we need to ensure that status block indices
2277 * have been actually read (bnx2x_update_fpsb_idx)
2278 * prior to this check (bnx2x_has_rx_work) so that
2279 * we won't write the "newer" value of the status block
2280 * to IGU (if there was a DMA right after
2281 * bnx2x_has_rx_work and if there is no rmb, the memory
2282 * reading (bnx2x_update_fpsb_idx) may be postponed
2283 * to right before bnx2x_ack_sb). In this case there
2284 * will never be another interrupt until there is
2285 * another update of the status block, while there
2286 * is still unhandled work.
2287 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002288 rmb();
2289
2290 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2291 napi_complete(napi);
2292 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002293 DP(NETIF_MSG_HW,
2294 "Update index to %d\n", fp->fp_hc_idx);
2295 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2296 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002297 IGU_INT_ENABLE, 1);
2298 break;
2299 }
2300 }
2301 }
2302
2303 return work_done;
2304}
2305
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002306/* we split the first BD into headers and data BDs
2307 * to ease the pain of our fellow microcode engineers
2308 * we use one mapping for both BDs
2309 * So far this has only been observed to happen
2310 * in Other Operating Systems(TM)
2311 */
2312static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002313 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002314 struct sw_tx_bd *tx_buf,
2315 struct eth_tx_start_bd **tx_bd, u16 hlen,
2316 u16 bd_prod, int nbd)
2317{
2318 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2319 struct eth_tx_bd *d_tx_bd;
2320 dma_addr_t mapping;
2321 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2322
2323 /* first fix first BD */
2324 h_tx_bd->nbd = cpu_to_le16(nbd);
2325 h_tx_bd->nbytes = cpu_to_le16(hlen);
2326
2327 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2328 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2329 h_tx_bd->addr_lo, h_tx_bd->nbd);
2330
2331 /* now get a new data BD
2332 * (after the pbd) and fill it */
2333 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002334 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002335
2336 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2337 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2338
2339 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2340 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2341 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2342
2343 /* this marks the BD as one that has no individual mapping */
2344 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2345
2346 DP(NETIF_MSG_TX_QUEUED,
2347 "TSO split data size is %d (%x:%x)\n",
2348 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2349
2350 /* update tx_bd */
2351 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2352
2353 return bd_prod;
2354}
2355
2356static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2357{
2358 if (fix > 0)
2359 csum = (u16) ~csum_fold(csum_sub(csum,
2360 csum_partial(t_header - fix, fix, 0)));
2361
2362 else if (fix < 0)
2363 csum = (u16) ~csum_fold(csum_add(csum,
2364 csum_partial(t_header, -fix, 0)));
2365
2366 return swab16(csum);
2367}
2368
2369static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2370{
2371 u32 rc;
2372
2373 if (skb->ip_summed != CHECKSUM_PARTIAL)
2374 rc = XMIT_PLAIN;
2375
2376 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002377 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002378 rc = XMIT_CSUM_V6;
2379 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2380 rc |= XMIT_CSUM_TCP;
2381
2382 } else {
2383 rc = XMIT_CSUM_V4;
2384 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2385 rc |= XMIT_CSUM_TCP;
2386 }
2387 }
2388
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002389 if (skb_is_gso_v6(skb))
2390 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2391 else if (skb_is_gso(skb))
2392 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002393
2394 return rc;
2395}
2396
2397#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2398/* check if packet requires linearization (packet is too fragmented)
2399 no need to check fragmentation if page size > 8K (there will be no
2400 violation to FW restrictions) */
2401static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2402 u32 xmit_type)
2403{
2404 int to_copy = 0;
2405 int hlen = 0;
2406 int first_bd_sz = 0;
2407
2408 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2409 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2410
2411 if (xmit_type & XMIT_GSO) {
2412 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2413 /* Check if LSO packet needs to be copied:
2414 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2415 int wnd_size = MAX_FETCH_BD - 3;
2416 /* Number of windows to check */
2417 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2418 int wnd_idx = 0;
2419 int frag_idx = 0;
2420 u32 wnd_sum = 0;
2421
2422 /* Headers length */
2423 hlen = (int)(skb_transport_header(skb) - skb->data) +
2424 tcp_hdrlen(skb);
2425
2426 /* Amount of data (w/o headers) on linear part of SKB*/
2427 first_bd_sz = skb_headlen(skb) - hlen;
2428
2429 wnd_sum = first_bd_sz;
2430
2431 /* Calculate the first sum - it's special */
2432 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2433 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002434 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002435
2436 /* If there was data on linear skb data - check it */
2437 if (first_bd_sz > 0) {
2438 if (unlikely(wnd_sum < lso_mss)) {
2439 to_copy = 1;
2440 goto exit_lbl;
2441 }
2442
2443 wnd_sum -= first_bd_sz;
2444 }
2445
2446 /* Others are easier: run through the frag list and
2447 check all windows */
2448 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2449 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002450 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002451
2452 if (unlikely(wnd_sum < lso_mss)) {
2453 to_copy = 1;
2454 break;
2455 }
2456 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002457 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002458 }
2459 } else {
2460 /* in non-LSO too fragmented packet should always
2461 be linearized */
2462 to_copy = 1;
2463 }
2464 }
2465
2466exit_lbl:
2467 if (unlikely(to_copy))
2468 DP(NETIF_MSG_TX_QUEUED,
2469 "Linearization IS REQUIRED for %s packet. "
2470 "num_frags %d hlen %d first_bd_sz %d\n",
2471 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2472 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2473
2474 return to_copy;
2475}
2476#endif
2477
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002478static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2479 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002480{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002481 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2482 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2483 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002484 if ((xmit_type & XMIT_GSO_V6) &&
2485 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002486 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002487}
2488
2489/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002490 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002491 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002492 * @skb: packet skb
2493 * @pbd: parse BD
2494 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002495 */
2496static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2497 struct eth_tx_parse_bd_e1x *pbd,
2498 u32 xmit_type)
2499{
2500 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2501 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2502 pbd->tcp_flags = pbd_tcp_flags(skb);
2503
2504 if (xmit_type & XMIT_GSO_V4) {
2505 pbd->ip_id = swab16(ip_hdr(skb)->id);
2506 pbd->tcp_pseudo_csum =
2507 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2508 ip_hdr(skb)->daddr,
2509 0, IPPROTO_TCP, 0));
2510
2511 } else
2512 pbd->tcp_pseudo_csum =
2513 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2514 &ipv6_hdr(skb)->daddr,
2515 0, IPPROTO_TCP, 0));
2516
2517 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2518}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002519
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002520/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002521 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002522 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002523 * @bp: driver handle
2524 * @skb: packet skb
2525 * @parsing_data: data to be updated
2526 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002527 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002528 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002529 */
2530static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002531 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002532{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002533 *parsing_data |=
2534 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2535 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2536 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002537
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002538 if (xmit_type & XMIT_CSUM_TCP) {
2539 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2540 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2541 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002542
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002543 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2544 } else
2545 /* We support checksum offload for TCP and UDP only.
2546 * No need to pass the UDP header length - it's a constant.
2547 */
2548 return skb_transport_header(skb) +
2549 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002550}
2551
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002552static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2553 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2554{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002555 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2556
2557 if (xmit_type & XMIT_CSUM_V4)
2558 tx_start_bd->bd_flags.as_bitfield |=
2559 ETH_TX_BD_FLAGS_IP_CSUM;
2560 else
2561 tx_start_bd->bd_flags.as_bitfield |=
2562 ETH_TX_BD_FLAGS_IPV6;
2563
2564 if (!(xmit_type & XMIT_CSUM_TCP))
2565 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002566}
2567
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002568/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002569 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002570 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002571 * @bp: driver handle
2572 * @skb: packet skb
2573 * @pbd: parse BD to be updated
2574 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002575 */
2576static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2577 struct eth_tx_parse_bd_e1x *pbd,
2578 u32 xmit_type)
2579{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002580 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002581
2582 /* for now NS flag is not used in Linux */
2583 pbd->global_data =
2584 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2585 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2586
2587 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002588 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002589
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002590 hlen += pbd->ip_hlen_w;
2591
2592 /* We support checksum offload for TCP and UDP only */
2593 if (xmit_type & XMIT_CSUM_TCP)
2594 hlen += tcp_hdrlen(skb) / 2;
2595 else
2596 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002597
2598 pbd->total_hlen_w = cpu_to_le16(hlen);
2599 hlen = hlen*2;
2600
2601 if (xmit_type & XMIT_CSUM_TCP) {
2602 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2603
2604 } else {
2605 s8 fix = SKB_CS_OFF(skb); /* signed! */
2606
2607 DP(NETIF_MSG_TX_QUEUED,
2608 "hlen %d fix %d csum before fix %x\n",
2609 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2610
2611 /* HW bug: fixup the CSUM */
2612 pbd->tcp_pseudo_csum =
2613 bnx2x_csum_fix(skb_transport_header(skb),
2614 SKB_CS(skb), fix);
2615
2616 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2617 pbd->tcp_pseudo_csum);
2618 }
2619
2620 return hlen;
2621}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002622
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002623/* called with netif_tx_lock
2624 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2625 * netif_wake_queue()
2626 */
2627netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2628{
2629 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002630
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002631 struct bnx2x_fastpath *fp;
2632 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002633 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002634 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002635 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002636 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002637 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002638 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002639 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002640 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002641 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002642 dma_addr_t mapping;
2643 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2644 int i;
2645 u8 hlen = 0;
2646 __le16 pkt_size = 0;
2647 struct ethhdr *eth;
2648 u8 mac_type = UNICAST_ADDRESS;
2649
2650#ifdef BNX2X_STOP_ON_ERROR
2651 if (unlikely(bp->panic))
2652 return NETDEV_TX_BUSY;
2653#endif
2654
Ariel Elior6383c0b2011-07-14 08:31:57 +00002655 txq_index = skb_get_queue_mapping(skb);
2656 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002657
Ariel Elior6383c0b2011-07-14 08:31:57 +00002658 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2659
2660 /* decode the fastpath index and the cos index from the txq */
2661 fp_index = TXQ_TO_FP(txq_index);
2662 txdata_index = TXQ_TO_COS(txq_index);
2663
2664#ifdef BCM_CNIC
2665 /*
2666 * Override the above for the FCoE queue:
2667 * - FCoE fp entry is right after the ETH entries.
2668 * - FCoE L2 queue uses bp->txdata[0] only.
2669 */
2670 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2671 bnx2x_fcoe_tx(bp, txq_index)))) {
2672 fp_index = FCOE_IDX;
2673 txdata_index = 0;
2674 }
2675#endif
2676
2677 /* enable this debug print to view the transmission queue being used
Joe Perches94f05b02011-08-14 12:16:20 +00002678 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002679 txq_index, fp_index, txdata_index); */
2680
2681 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002682 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002683 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002684
Ariel Elior6383c0b2011-07-14 08:31:57 +00002685 /* enable this debug print to view the tranmission details
2686 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
Joe Perches94f05b02011-08-14 12:16:20 +00002687 " tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002688 txdata->cid, fp_index, txdata_index, txdata, fp); */
2689
2690 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2691 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002692 fp->eth_q_stats.driver_xoff++;
2693 netif_tx_stop_queue(txq);
2694 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2695 return NETDEV_TX_BUSY;
2696 }
2697
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002698 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2699 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002700 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002701 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2702
2703 eth = (struct ethhdr *)skb->data;
2704
2705 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2706 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2707 if (is_broadcast_ether_addr(eth->h_dest))
2708 mac_type = BROADCAST_ADDRESS;
2709 else
2710 mac_type = MULTICAST_ADDRESS;
2711 }
2712
2713#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2714 /* First, check if we need to linearize the skb (due to FW
2715 restrictions). No need to check fragmentation if page size > 8K
2716 (there will be no violation to FW restrictions) */
2717 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2718 /* Statistics of linearization */
2719 bp->lin_cnt++;
2720 if (skb_linearize(skb) != 0) {
2721 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2722 "silently dropping this SKB\n");
2723 dev_kfree_skb_any(skb);
2724 return NETDEV_TX_OK;
2725 }
2726 }
2727#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002728 /* Map skb linear data for DMA */
2729 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2730 skb_headlen(skb), DMA_TO_DEVICE);
2731 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2732 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2733 "silently dropping this SKB\n");
2734 dev_kfree_skb_any(skb);
2735 return NETDEV_TX_OK;
2736 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002737 /*
2738 Please read carefully. First we use one BD which we mark as start,
2739 then we have a parsing info BD (used for TSO or xsum),
2740 and only then we have the rest of the TSO BDs.
2741 (don't forget to mark the last one as last,
2742 and to unmap only AFTER you write to the BD ...)
2743 And above all, all pdb sizes are in words - NOT DWORDS!
2744 */
2745
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002746 /* get current pkt produced now - advance it just before sending packet
2747 * since mapping of pages may fail and cause packet to be dropped
2748 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002749 pkt_prod = txdata->tx_pkt_prod;
2750 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002751
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002752 /* get a tx_buf and first BD
2753 * tx_start_bd may be changed during SPLIT,
2754 * but first_bd will always stay first
2755 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002756 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2757 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002758 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002759
2760 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002761 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2762 mac_type);
2763
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002764 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002765 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002766
2767 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002768 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002769 tx_buf->skb = skb;
2770 tx_buf->flags = 0;
2771
2772 DP(NETIF_MSG_TX_QUEUED,
2773 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002774 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002775
Jesse Grosseab6d182010-10-20 13:56:03 +00002776 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002777 tx_start_bd->vlan_or_ethertype =
2778 cpu_to_le16(vlan_tx_tag_get(skb));
2779 tx_start_bd->bd_flags.as_bitfield |=
2780 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002781 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002782 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002783
2784 /* turn on parsing and get a BD */
2785 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002786
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002787 if (xmit_type & XMIT_CSUM)
2788 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002789
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002790 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002791 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002792 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2793 /* Set PBD in checksum offload case */
2794 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002795 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2796 &pbd_e2_parsing_data,
2797 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002798 if (IS_MF_SI(bp)) {
2799 /*
2800 * fill in the MAC addresses in the PBD - for local
2801 * switching
2802 */
2803 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2804 &pbd_e2->src_mac_addr_mid,
2805 &pbd_e2->src_mac_addr_lo,
2806 eth->h_source);
2807 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2808 &pbd_e2->dst_mac_addr_mid,
2809 &pbd_e2->dst_mac_addr_lo,
2810 eth->h_dest);
2811 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002812 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002813 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002814 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2815 /* Set PBD in checksum offload case */
2816 if (xmit_type & XMIT_CSUM)
2817 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002818
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002819 }
2820
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002821 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002822 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2823 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002824 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002825 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2826 pkt_size = tx_start_bd->nbytes;
2827
2828 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2829 " nbytes %d flags %x vlan %x\n",
2830 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2831 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002832 tx_start_bd->bd_flags.as_bitfield,
2833 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002834
2835 if (xmit_type & XMIT_GSO) {
2836
2837 DP(NETIF_MSG_TX_QUEUED,
2838 "TSO packet len %d hlen %d total len %d tso size %d\n",
2839 skb->len, hlen, skb_headlen(skb),
2840 skb_shinfo(skb)->gso_size);
2841
2842 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2843
2844 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002845 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2846 &tx_start_bd, hlen,
2847 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002848 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002849 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2850 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002851 else
2852 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002853 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002854
2855 /* Set the PBD's parsing_data field if not zero
2856 * (for the chips newer than 57711).
2857 */
2858 if (pbd_e2_parsing_data)
2859 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2860
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002861 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2862
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002863 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2865 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2866
Eric Dumazet9e903e02011-10-18 21:00:24 +00002867 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2868 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002869 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00002870 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002871
2872 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2873 "dropping packet...\n");
2874
2875 /* we need unmap all buffers already mapped
2876 * for this SKB;
2877 * first_bd->nbd need to be properly updated
2878 * before call to bnx2x_free_tx_pkt
2879 */
2880 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002881 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00002882 TX_BD(txdata->tx_pkt_prod),
2883 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002884 return NETDEV_TX_OK;
2885 }
2886
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002887 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002888 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002890 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002891
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002892 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2893 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00002894 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2895 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002896 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002897
2898 DP(NETIF_MSG_TX_QUEUED,
2899 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2900 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2901 le16_to_cpu(tx_data_bd->nbytes));
2902 }
2903
2904 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2905
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002906 /* update with actual num BDs */
2907 first_bd->nbd = cpu_to_le16(nbd);
2908
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002909 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2910
2911 /* now send a tx doorbell, counting the next BD
2912 * if the packet contains or ends with it
2913 */
2914 if (TX_BD_POFF(bd_prod) < nbd)
2915 nbd++;
2916
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002917 /* total_pkt_bytes should be set on the first data BD if
2918 * it's not an LSO packet and there is more than one
2919 * data BD. In this case pkt_size is limited by an MTU value.
2920 * However we prefer to set it for an LSO packet (while we don't
2921 * have to) in order to save some CPU cycles in a none-LSO
2922 * case, when we much more care about them.
2923 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002924 if (total_pkt_bd != NULL)
2925 total_pkt_bd->total_pkt_bytes = pkt_size;
2926
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002927 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002928 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002929 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002930 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002931 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2932 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2933 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2934 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002935 if (pbd_e2)
2936 DP(NETIF_MSG_TX_QUEUED,
2937 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2938 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2939 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2940 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2941 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2943
Tom Herbert2df1a702011-11-28 16:33:37 +00002944 netdev_tx_sent_queue(txq, skb->len);
2945
Ariel Elior6383c0b2011-07-14 08:31:57 +00002946 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002947 /*
2948 * Make sure that the BD data is updated before updating the producer
2949 * since FW might read the BD right after the producer is updated.
2950 * This is only applicable for weak-ordered memory model archs such
2951 * as IA-64. The following barrier is also mandatory since FW will
2952 * assumes packets must have BDs.
2953 */
2954 wmb();
2955
Ariel Elior6383c0b2011-07-14 08:31:57 +00002956 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002957 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002958
Ariel Elior6383c0b2011-07-14 08:31:57 +00002959 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002960
2961 mmiowb();
2962
Ariel Elior6383c0b2011-07-14 08:31:57 +00002963 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002964
Ariel Elior6383c0b2011-07-14 08:31:57 +00002965 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002966 netif_tx_stop_queue(txq);
2967
2968 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2969 * ordering of set_bit() in netif_tx_stop_queue() and read of
2970 * fp->bd_tx_cons */
2971 smp_mb();
2972
2973 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002974 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002975 netif_tx_wake_queue(txq);
2976 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002977 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002978
2979 return NETDEV_TX_OK;
2980}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002981
Ariel Elior6383c0b2011-07-14 08:31:57 +00002982/**
2983 * bnx2x_setup_tc - routine to configure net_device for multi tc
2984 *
2985 * @netdev: net device to configure
2986 * @tc: number of traffic classes to enable
2987 *
2988 * callback connected to the ndo_setup_tc function pointer
2989 */
2990int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2991{
2992 int cos, prio, count, offset;
2993 struct bnx2x *bp = netdev_priv(dev);
2994
2995 /* setup tc must be called under rtnl lock */
2996 ASSERT_RTNL();
2997
2998 /* no traffic classes requested. aborting */
2999 if (!num_tc) {
3000 netdev_reset_tc(dev);
3001 return 0;
3002 }
3003
3004 /* requested to support too many traffic classes */
3005 if (num_tc > bp->max_cos) {
3006 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
Joe Perches94f05b02011-08-14 12:16:20 +00003007 " requested: %d. max supported is %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003008 num_tc, bp->max_cos);
3009 return -EINVAL;
3010 }
3011
3012 /* declare amount of supported traffic classes */
3013 if (netdev_set_num_tc(dev, num_tc)) {
Joe Perches94f05b02011-08-14 12:16:20 +00003014 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003015 num_tc);
3016 return -EINVAL;
3017 }
3018
3019 /* configure priority to traffic class mapping */
3020 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3021 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Joe Perches94f05b02011-08-14 12:16:20 +00003022 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003023 prio, bp->prio_to_cos[prio]);
3024 }
3025
3026
3027 /* Use this configuration to diffrentiate tc0 from other COSes
3028 This can be used for ets or pfc, and save the effort of setting
3029 up a multio class queue disc or negotiating DCBX with a switch
3030 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003031 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003032 for (prio = 1; prio < 16; prio++) {
3033 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003034 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003035 } */
3036
3037 /* configure traffic class to transmission queue mapping */
3038 for (cos = 0; cos < bp->max_cos; cos++) {
3039 count = BNX2X_NUM_ETH_QUEUES(bp);
3040 offset = cos * MAX_TXQS_PER_COS;
3041 netdev_set_tc_queue(dev, cos, count, offset);
Joe Perches94f05b02011-08-14 12:16:20 +00003042 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003043 cos, offset, count);
3044 }
3045
3046 return 0;
3047}
3048
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003049/* called with rtnl_lock */
3050int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3051{
3052 struct sockaddr *addr = p;
3053 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003054 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003055
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003056 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003057 return -EINVAL;
3058
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003059#ifdef BCM_CNIC
3060 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
3061 return -EINVAL;
3062#endif
3063
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003064 if (netif_running(dev)) {
3065 rc = bnx2x_set_eth_mac(bp, false);
3066 if (rc)
3067 return rc;
3068 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003069
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003070 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003071 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3072
3073 if (netif_running(dev))
3074 rc = bnx2x_set_eth_mac(bp, true);
3075
3076 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003077}
3078
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003079static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3080{
3081 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3082 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003083 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003084
3085 /* Common */
3086#ifdef BCM_CNIC
3087 if (IS_FCOE_IDX(fp_index)) {
3088 memset(sb, 0, sizeof(union host_hc_status_block));
3089 fp->status_blk_mapping = 0;
3090
3091 } else {
3092#endif
3093 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003094 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003095 BNX2X_PCI_FREE(sb->e2_sb,
3096 bnx2x_fp(bp, fp_index,
3097 status_blk_mapping),
3098 sizeof(struct host_hc_status_block_e2));
3099 else
3100 BNX2X_PCI_FREE(sb->e1x_sb,
3101 bnx2x_fp(bp, fp_index,
3102 status_blk_mapping),
3103 sizeof(struct host_hc_status_block_e1x));
3104#ifdef BCM_CNIC
3105 }
3106#endif
3107 /* Rx */
3108 if (!skip_rx_queue(bp, fp_index)) {
3109 bnx2x_free_rx_bds(fp);
3110
3111 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3112 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3113 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3114 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3115 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3116
3117 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3118 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3119 sizeof(struct eth_fast_path_rx_cqe) *
3120 NUM_RCQ_BD);
3121
3122 /* SGE ring */
3123 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3124 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3125 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3126 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3127 }
3128
3129 /* Tx */
3130 if (!skip_tx_queue(bp, fp_index)) {
3131 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003132 for_each_cos_in_tx_queue(fp, cos) {
3133 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3134
3135 DP(BNX2X_MSG_SP,
Joe Perches94f05b02011-08-14 12:16:20 +00003136 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003137 fp_index, cos, txdata->cid);
3138
3139 BNX2X_FREE(txdata->tx_buf_ring);
3140 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3141 txdata->tx_desc_mapping,
3142 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3143 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003144 }
3145 /* end of fastpath */
3146}
3147
3148void bnx2x_free_fp_mem(struct bnx2x *bp)
3149{
3150 int i;
3151 for_each_queue(bp, i)
3152 bnx2x_free_fp_mem_at(bp, i);
3153}
3154
3155static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3156{
3157 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003158 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003159 bnx2x_fp(bp, index, sb_index_values) =
3160 (__le16 *)status_blk.e2_sb->sb.index_values;
3161 bnx2x_fp(bp, index, sb_running_index) =
3162 (__le16 *)status_blk.e2_sb->sb.running_index;
3163 } else {
3164 bnx2x_fp(bp, index, sb_index_values) =
3165 (__le16 *)status_blk.e1x_sb->sb.index_values;
3166 bnx2x_fp(bp, index, sb_running_index) =
3167 (__le16 *)status_blk.e1x_sb->sb.running_index;
3168 }
3169}
3170
3171static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3172{
3173 union host_hc_status_block *sb;
3174 struct bnx2x_fastpath *fp = &bp->fp[index];
3175 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003176 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003177 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003178
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003179#ifdef BCM_CNIC
Dmitry Kravkov1fdf1552012-01-23 07:31:54 +00003180 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003181 rx_ring_size = MIN_RX_SIZE_NONTPA;
3182 bp->rx_ring_size = rx_ring_size;
3183 } else
3184#endif
David S. Miller8decf862011-09-22 03:23:13 -04003185 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003186 u32 cfg = SHMEM_RD(bp,
3187 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003188
David S. Miller8decf862011-09-22 03:23:13 -04003189 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3190
Mintz Yuvald760fc32012-02-15 02:10:28 +00003191 /* Dercease ring size for 1G functions */
3192 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3193 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3194 rx_ring_size /= 10;
3195
David S. Miller8decf862011-09-22 03:23:13 -04003196 /* allocate at least number of buffers required by FW */
3197 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3198 MIN_RX_SIZE_TPA, rx_ring_size);
3199
3200 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003201 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003202 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003203
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003204 /* Common */
3205 sb = &bnx2x_fp(bp, index, status_blk);
3206#ifdef BCM_CNIC
3207 if (!IS_FCOE_IDX(index)) {
3208#endif
3209 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003210 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003211 BNX2X_PCI_ALLOC(sb->e2_sb,
3212 &bnx2x_fp(bp, index, status_blk_mapping),
3213 sizeof(struct host_hc_status_block_e2));
3214 else
3215 BNX2X_PCI_ALLOC(sb->e1x_sb,
3216 &bnx2x_fp(bp, index, status_blk_mapping),
3217 sizeof(struct host_hc_status_block_e1x));
3218#ifdef BCM_CNIC
3219 }
3220#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003221
3222 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3223 * set shortcuts for it.
3224 */
3225 if (!IS_FCOE_IDX(index))
3226 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003227
3228 /* Tx */
3229 if (!skip_tx_queue(bp, index)) {
3230 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003231 for_each_cos_in_tx_queue(fp, cos) {
3232 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3233
3234 DP(BNX2X_MSG_SP, "allocating tx memory of "
Joe Perches94f05b02011-08-14 12:16:20 +00003235 "fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003236 index, cos);
3237
3238 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003239 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003240 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3241 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003242 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003243 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003244 }
3245
3246 /* Rx */
3247 if (!skip_rx_queue(bp, index)) {
3248 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3249 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3250 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3251 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3252 &bnx2x_fp(bp, index, rx_desc_mapping),
3253 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3254
3255 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3256 &bnx2x_fp(bp, index, rx_comp_mapping),
3257 sizeof(struct eth_fast_path_rx_cqe) *
3258 NUM_RCQ_BD);
3259
3260 /* SGE ring */
3261 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3262 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3263 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3264 &bnx2x_fp(bp, index, rx_sge_mapping),
3265 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3266 /* RX BD ring */
3267 bnx2x_set_next_page_rx_bd(fp);
3268
3269 /* CQ ring */
3270 bnx2x_set_next_page_rx_cq(fp);
3271
3272 /* BDs */
3273 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3274 if (ring_size < rx_ring_size)
3275 goto alloc_mem_err;
3276 }
3277
3278 return 0;
3279
3280/* handles low memory cases */
3281alloc_mem_err:
3282 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3283 index, ring_size);
3284 /* FW will drop all packets if queue is not big enough,
3285 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003286 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003287 */
3288 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003289 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003290 /* release memory allocated for this queue */
3291 bnx2x_free_fp_mem_at(bp, index);
3292 return -ENOMEM;
3293 }
3294 return 0;
3295}
3296
3297int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3298{
3299 int i;
3300
3301 /**
3302 * 1. Allocate FP for leading - fatal if error
3303 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003304 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3305 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003306 */
3307
3308 /* leading */
3309 if (bnx2x_alloc_fp_mem_at(bp, 0))
3310 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003311
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003312#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003313 if (!NO_FCOE(bp))
3314 /* FCoE */
3315 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3316 /* we will fail load process instead of mark
3317 * NO_FCOE_FLAG
3318 */
3319 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003320#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003321
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003322 /* RSS */
3323 for_each_nondefault_eth_queue(bp, i)
3324 if (bnx2x_alloc_fp_mem_at(bp, i))
3325 break;
3326
3327 /* handle memory failures */
3328 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3329 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3330
3331 WARN_ON(delta < 0);
3332#ifdef BCM_CNIC
3333 /**
3334 * move non eth FPs next to last eth FP
3335 * must be done in that order
3336 * FCOE_IDX < FWD_IDX < OOO_IDX
3337 */
3338
Ariel Elior6383c0b2011-07-14 08:31:57 +00003339 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003340 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3341#endif
3342 bp->num_queues -= delta;
3343 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3344 bp->num_queues + delta, bp->num_queues);
3345 }
3346
3347 return 0;
3348}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003349
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003350void bnx2x_free_mem_bp(struct bnx2x *bp)
3351{
3352 kfree(bp->fp);
3353 kfree(bp->msix_table);
3354 kfree(bp->ilt);
3355}
3356
3357int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3358{
3359 struct bnx2x_fastpath *fp;
3360 struct msix_entry *tbl;
3361 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003362 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003363
Ariel Elior6383c0b2011-07-14 08:31:57 +00003364 /*
3365 * The biggest MSI-X table we might need is as a maximum number of fast
3366 * path IGU SBs plus default SB (for PF).
3367 */
3368 msix_table_size = bp->igu_sb_cnt + 1;
3369
3370 /* fp array: RSS plus CNIC related L2 queues */
Thomas Meyer01e23742011-11-29 11:08:00 +00003371 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003372 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003373 if (!fp)
3374 goto alloc_err;
3375 bp->fp = fp;
3376
3377 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003378 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003379 if (!tbl)
3380 goto alloc_err;
3381 bp->msix_table = tbl;
3382
3383 /* ilt */
3384 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3385 if (!ilt)
3386 goto alloc_err;
3387 bp->ilt = ilt;
3388
3389 return 0;
3390alloc_err:
3391 bnx2x_free_mem_bp(bp);
3392 return -ENOMEM;
3393
3394}
3395
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003396int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003397{
3398 struct bnx2x *bp = netdev_priv(dev);
3399
3400 if (unlikely(!netif_running(dev)))
3401 return 0;
3402
3403 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3404 return bnx2x_nic_load(bp, LOAD_NORMAL);
3405}
3406
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003407int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3408{
3409 u32 sel_phy_idx = 0;
3410 if (bp->link_params.num_phys <= 1)
3411 return INT_PHY;
3412
3413 if (bp->link_vars.link_up) {
3414 sel_phy_idx = EXT_PHY1;
3415 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3416 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3417 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3418 sel_phy_idx = EXT_PHY2;
3419 } else {
3420
3421 switch (bnx2x_phy_selection(&bp->link_params)) {
3422 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3423 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3424 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3425 sel_phy_idx = EXT_PHY1;
3426 break;
3427 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3428 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3429 sel_phy_idx = EXT_PHY2;
3430 break;
3431 }
3432 }
3433
3434 return sel_phy_idx;
3435
3436}
3437int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3438{
3439 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3440 /*
3441 * The selected actived PHY is always after swapping (in case PHY
3442 * swapping is enabled). So when swapping is enabled, we need to reverse
3443 * the configuration
3444 */
3445
3446 if (bp->link_params.multi_phy_config &
3447 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3448 if (sel_phy_idx == EXT_PHY1)
3449 sel_phy_idx = EXT_PHY2;
3450 else if (sel_phy_idx == EXT_PHY2)
3451 sel_phy_idx = EXT_PHY1;
3452 }
3453 return LINK_CONFIG_IDX(sel_phy_idx);
3454}
3455
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003456#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3457int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3458{
3459 struct bnx2x *bp = netdev_priv(dev);
3460 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3461
3462 switch (type) {
3463 case NETDEV_FCOE_WWNN:
3464 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3465 cp->fcoe_wwn_node_name_lo);
3466 break;
3467 case NETDEV_FCOE_WWPN:
3468 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3469 cp->fcoe_wwn_port_name_lo);
3470 break;
3471 default:
3472 return -EINVAL;
3473 }
3474
3475 return 0;
3476}
3477#endif
3478
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003479/* called with rtnl_lock */
3480int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3481{
3482 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003483
3484 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00003485 netdev_err(dev, "Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003486 return -EAGAIN;
3487 }
3488
3489 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3490 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3491 return -EINVAL;
3492
3493 /* This does not race with packet allocation
3494 * because the actual alloc size is
3495 * only updated as part of load
3496 */
3497 dev->mtu = new_mtu;
3498
Dmitry Kravkovfe603b42012-02-20 09:59:11 +00003499 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3500
Michał Mirosław66371c42011-04-12 09:38:23 +00003501 return bnx2x_reload_if_running(dev);
3502}
3503
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003504netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003505 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003506{
3507 struct bnx2x *bp = netdev_priv(dev);
3508
3509 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003510 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003511 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003512 features &= ~NETIF_F_GRO;
3513 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003514
3515 return features;
3516}
3517
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003518int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003519{
3520 struct bnx2x *bp = netdev_priv(dev);
3521 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003522 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003523
3524 if (features & NETIF_F_LRO)
3525 flags |= TPA_ENABLE_FLAG;
3526 else
3527 flags &= ~TPA_ENABLE_FLAG;
3528
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003529 if (features & NETIF_F_GRO)
3530 flags |= GRO_ENABLE_FLAG;
3531 else
3532 flags &= ~GRO_ENABLE_FLAG;
3533
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003534 if (features & NETIF_F_LOOPBACK) {
3535 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3536 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3537 bnx2x_reload = true;
3538 }
3539 } else {
3540 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3541 bp->link_params.loopback_mode = LOOPBACK_NONE;
3542 bnx2x_reload = true;
3543 }
3544 }
3545
Michał Mirosław66371c42011-04-12 09:38:23 +00003546 if (flags ^ bp->flags) {
3547 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003548 bnx2x_reload = true;
3549 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003550
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003551 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003552 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3553 return bnx2x_reload_if_running(dev);
3554 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003555 }
3556
Michał Mirosław66371c42011-04-12 09:38:23 +00003557 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003558}
3559
3560void bnx2x_tx_timeout(struct net_device *dev)
3561{
3562 struct bnx2x *bp = netdev_priv(dev);
3563
3564#ifdef BNX2X_STOP_ON_ERROR
3565 if (!bp->panic)
3566 bnx2x_panic();
3567#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003568
3569 smp_mb__before_clear_bit();
3570 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3571 smp_mb__after_clear_bit();
3572
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003573 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003574 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003575}
3576
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003577int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3578{
3579 struct net_device *dev = pci_get_drvdata(pdev);
3580 struct bnx2x *bp;
3581
3582 if (!dev) {
3583 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3584 return -ENODEV;
3585 }
3586 bp = netdev_priv(dev);
3587
3588 rtnl_lock();
3589
3590 pci_save_state(pdev);
3591
3592 if (!netif_running(dev)) {
3593 rtnl_unlock();
3594 return 0;
3595 }
3596
3597 netif_device_detach(dev);
3598
3599 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3600
3601 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3602
3603 rtnl_unlock();
3604
3605 return 0;
3606}
3607
3608int bnx2x_resume(struct pci_dev *pdev)
3609{
3610 struct net_device *dev = pci_get_drvdata(pdev);
3611 struct bnx2x *bp;
3612 int rc;
3613
3614 if (!dev) {
3615 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3616 return -ENODEV;
3617 }
3618 bp = netdev_priv(dev);
3619
3620 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00003621 netdev_err(dev, "Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003622 return -EAGAIN;
3623 }
3624
3625 rtnl_lock();
3626
3627 pci_restore_state(pdev);
3628
3629 if (!netif_running(dev)) {
3630 rtnl_unlock();
3631 return 0;
3632 }
3633
3634 bnx2x_set_power_state(bp, PCI_D0);
3635 netif_device_attach(dev);
3636
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003637 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3638
3639 rtnl_unlock();
3640
3641 return rc;
3642}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003643
3644
3645void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3646 u32 cid)
3647{
3648 /* ustorm cxt validation */
3649 cxt->ustorm_ag_context.cdu_usage =
3650 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3651 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3652 /* xcontext validation */
3653 cxt->xstorm_ag_context.cdu_reserved =
3654 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3655 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3656}
3657
3658static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3659 u8 fw_sb_id, u8 sb_index,
3660 u8 ticks)
3661{
3662
3663 u32 addr = BAR_CSTRORM_INTMEM +
3664 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3665 REG_WR8(bp, addr, ticks);
3666 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3667 port, fw_sb_id, sb_index, ticks);
3668}
3669
3670static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3671 u16 fw_sb_id, u8 sb_index,
3672 u8 disable)
3673{
3674 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3675 u32 addr = BAR_CSTRORM_INTMEM +
3676 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3677 u16 flags = REG_RD16(bp, addr);
3678 /* clear and set */
3679 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3680 flags |= enable_flag;
3681 REG_WR16(bp, addr, flags);
3682 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3683 port, fw_sb_id, sb_index, disable);
3684}
3685
3686void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3687 u8 sb_index, u8 disable, u16 usec)
3688{
3689 int port = BP_PORT(bp);
3690 u8 ticks = usec / BNX2X_BTR;
3691
3692 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3693
3694 disable = disable ? 1 : (usec ? 0 : 1);
3695 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3696}