blob: f2d1ff10054b28ccaa9b7baddfa408821c7409af [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030027#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000028#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000031#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000043 * source onto the target. Update txdata pointers and related
44 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000050 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000054 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000056
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000060 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000063
Barak Witkowski15192a82012-06-19 07:48:28 +000064 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
Merav Sicron65565882012-06-19 07:48:26 +000070 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
Yuval Mintz4864a162013-01-10 04:53:39 +000083 memcpy(&bp->bnx2x_txq[new_txdata_index],
84 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +000085 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087}
88
Ariel Elior8ca5e172013-01-01 05:22:34 +000089/**
90 * bnx2x_fill_fw_str - Fill buffer with FW version string.
91 *
92 * @bp: driver handle
93 * @buf: character buffer to fill with the fw name
94 * @buf_len: length of the above buffer
95 *
96 */
97void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
98{
99 if (IS_PF(bp)) {
100 u8 phy_fw_ver[PHY_FW_VER_LEN];
101
102 phy_fw_ver[0] = '\0';
103 bnx2x_get_ext_phy_fw_version(&bp->link_params,
104 phy_fw_ver, PHY_FW_VER_LEN);
105 strlcpy(buf, bp->fw_ver, buf_len);
106 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
107 "bc %d.%d.%d%s%s",
108 (bp->common.bc_ver & 0xff0000) >> 16,
109 (bp->common.bc_ver & 0xff00) >> 8,
110 (bp->common.bc_ver & 0xff),
111 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
112 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000113 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000114 }
115}
116
David S. Miller4b87f922013-01-15 15:05:59 -0500117/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000118 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
119 *
120 * @bp: driver handle
121 * @delta: number of eth queues which were not allocated
122 */
123static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
124{
125 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
126
127 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000128 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000129 */
130 for (cos = 1; cos < bp->max_cos; cos++) {
131 for (i = 0; i < old_eth_num - delta; i++) {
132 struct bnx2x_fastpath *fp = &bp->fp[i];
133 int new_idx = cos * (old_eth_num - delta) + i;
134
135 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
136 sizeof(struct bnx2x_fp_txdata));
137 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
138 }
139 }
140}
141
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300142int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
143
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000144/* free skb in the packet ring at pos idx
145 * return idx of last bd freed
146 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000147static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000148 u16 idx, unsigned int *pkts_compl,
149 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000150{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000151 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000152 struct eth_tx_start_bd *tx_start_bd;
153 struct eth_tx_bd *tx_data_bd;
154 struct sk_buff *skb = tx_buf->skb;
155 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
156 int nbd;
157
158 /* prefetch skb end pointer to speedup dev_kfree_skb() */
159 prefetch(&skb->end);
160
Merav Sicron51c1a582012-03-18 10:33:38 +0000161 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000162 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000163
164 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000165 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000166 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000167 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000203 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000207
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000208 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
Ariel Elior6383c0b2011-07-14 08:31:57 +0000215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000217 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000219 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
Ariel Elior6383c0b2011-07-14 08:31:57 +0000226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
Merav Sicron51c1a582012-03-18 10:33:38 +0000235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238
Tom Herbert2df1a702011-11-28 16:33:37 +0000239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000240 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000242 sw_cons++;
243 }
244
Tom Herbert2df1a702011-11-28 16:33:37 +0000245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
Ariel Elior6383c0b2011-07-14 08:31:57 +0000247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000258 */
259 smp_mb();
260
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000262 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296{
297 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000315 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317
318 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
Yuval Mintz2de67432013-01-23 03:21:43 +0000346/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000352{
Yuval Mintz2de67432013-01-23 03:21:43 +0000353 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000354 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000361 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000362 }
363 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000364 return 0;
365}
366
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000368 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300369 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
Eric Dumazete52fcb22011-11-14 06:05:34 +0000383 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300384 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000385 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000395 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
399
Eric Dumazete52fcb22011-11-14 06:05:34 +0000400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000403 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000421 tpa_info->gro_size = gro_size;
422 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300423
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000441 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000442 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000443 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000447 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000451 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000452 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000456{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000457 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300458 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000459 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000463 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000485}
486
Eric Dumazet1191cb82012-04-27 21:39:21 +0000487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000501 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000528 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536
537 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000542
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000563
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000572 return err;
573 }
574
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000575 /* Unmap the page as we're going to pass it to the stack */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000578 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000579 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000594
595 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000596 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
Eric Dumazetd46d1322012-12-10 12:16:06 +0000605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
Yuval Mintz99690852013-01-14 05:11:49 +0000621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000645
646static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
648{
649 skb_set_network_header(skb, 0);
650 gro_func(bp, skb);
651 tcp_gro_complete(skb);
652}
Yuval Mintz99690852013-01-14 05:11:49 +0000653#endif
654
655static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
656 struct sk_buff *skb)
657{
658#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000659 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000660 switch (be16_to_cpu(skb->protocol)) {
661 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000663 break;
664 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000666 break;
667 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000669 be16_to_cpu(skb->protocol));
670 }
Yuval Mintz99690852013-01-14 05:11:49 +0000671 }
672#endif
673 napi_gro_receive(&fp->napi, skb);
674}
675
Eric Dumazet1191cb82012-04-27 21:39:21 +0000676static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677 struct bnx2x_agg_info *tpa_info,
678 u16 pages,
679 struct eth_end_agg_rx_cqe *cqe,
680 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000681{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300682 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000683 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300684 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000685 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000686 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300687 u8 old_tpa_state = tpa_info->tpa_state;
688
689 tpa_info->tpa_state = BNX2X_TPA_STOP;
690
691 /* If we there was an error during the handling of the TPA_START -
692 * drop this aggregation.
693 */
694 if (old_tpa_state == BNX2X_TPA_ERROR)
695 goto drop;
696
Eric Dumazete52fcb22011-11-14 06:05:34 +0000697 /* Try to allocate the new data */
Eric Dumazetd46d1322012-12-10 12:16:06 +0000698 new_data = bnx2x_frag_alloc(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000699 /* Unmap skb in the pool anyway, as we are going to change
700 pool entry status to BNX2X_TPA_STOP even if new skb allocation
701 fails. */
702 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800703 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000704 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000705 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000706
Eric Dumazete52fcb22011-11-14 06:05:34 +0000707 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000708#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800709 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000710 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800711 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000712 bnx2x_panic();
713 return;
714 }
715#endif
716
Eric Dumazete52fcb22011-11-14 06:05:34 +0000717 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000718 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000719 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000720 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000721
722 skb->protocol = eth_type_trans(skb, bp->dev);
723 skb->ip_summed = CHECKSUM_UNNECESSARY;
724
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000729 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000730 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000731 DP(NETIF_MSG_RX_STATUS,
732 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000733 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000734 }
735
Eric Dumazete52fcb22011-11-14 06:05:34 +0000736 /* put new data in bin */
737 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000738
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300739 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000740 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000741 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300742drop:
743 /* drop the packet and keep the buffer in the bin */
744 DP(NETIF_MSG_RX_STATUS,
745 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000746 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000747}
748
Eric Dumazet1191cb82012-04-27 21:39:21 +0000749static int bnx2x_alloc_rx_data(struct bnx2x *bp,
750 struct bnx2x_fastpath *fp, u16 index)
751{
752 u8 *data;
753 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
754 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
755 dma_addr_t mapping;
756
Eric Dumazetd46d1322012-12-10 12:16:06 +0000757 data = bnx2x_frag_alloc(fp);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000758 if (unlikely(data == NULL))
759 return -ENOMEM;
760
761 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
762 fp->rx_buf_size,
763 DMA_FROM_DEVICE);
764 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000765 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000766 BNX2X_ERR("Can't map rx data\n");
767 return -ENOMEM;
768 }
769
770 rx_buf->data = data;
771 dma_unmap_addr_set(rx_buf, mapping, mapping);
772
773 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
774 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
775
776 return 0;
777}
778
Barak Witkowski15192a82012-06-19 07:48:28 +0000779static
780void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
781 struct bnx2x_fastpath *fp,
782 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000783{
Michal Schmidte4889212012-09-13 12:59:44 +0000784 /* Do nothing if no L4 csum validation was done.
785 * We do not check whether IP csum was validated. For IPv4 we assume
786 * that if the card got as far as validating the L4 csum, it also
787 * validated the IP csum. IPv6 has no IP csum.
788 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000789 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000790 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000791 return;
792
Michal Schmidte4889212012-09-13 12:59:44 +0000793 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000794
795 if (cqe->fast_path_cqe.type_error_flags &
796 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
797 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000798 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000799 else
800 skb->ip_summed = CHECKSUM_UNNECESSARY;
801}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802
803int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
804{
805 struct bnx2x *bp = fp->bp;
806 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300807 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000808 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300809 union eth_rx_cqe *cqe;
810 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000811
812#ifdef BNX2X_STOP_ON_ERROR
813 if (unlikely(bp->panic))
814 return 0;
815#endif
816
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000817 bd_cons = fp->rx_bd_cons;
818 bd_prod = fp->rx_bd_prod;
819 bd_prod_fw = bd_prod;
820 sw_comp_cons = fp->rx_comp_cons;
821 sw_comp_prod = fp->rx_comp_prod;
822
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300823 comp_ring_cons = RCQ_BD(sw_comp_cons);
824 cqe = &fp->rx_comp_ring[comp_ring_cons];
825 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000826
827 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300828 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000829
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300830 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000831 struct sw_rx_bd *rx_buf = NULL;
832 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000833 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300834 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000835 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000836 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000837 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000838
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300839#ifdef BNX2X_STOP_ON_ERROR
840 if (unlikely(bp->panic))
841 return 0;
842#endif
843
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000844 bd_prod = RX_BD(bd_prod);
845 bd_cons = RX_BD(bd_cons);
846
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300847 cqe_fp_flags = cqe_fp->type_error_flags;
848 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000849
Merav Sicron51c1a582012-03-18 10:33:38 +0000850 DP(NETIF_MSG_RX_STATUS,
851 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
852 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300853 cqe_fp_flags, cqe_fp->status_flags,
854 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000855 le16_to_cpu(cqe_fp->vlan_tag),
856 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000857
858 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300859 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000860 bnx2x_sp_event(fp, cqe);
861 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000862 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000863
Eric Dumazete52fcb22011-11-14 06:05:34 +0000864 rx_buf = &fp->rx_buf_ring[bd_cons];
865 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000866
Eric Dumazete52fcb22011-11-14 06:05:34 +0000867 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000868 struct bnx2x_agg_info *tpa_info;
869 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300870#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000871 /* sanity check */
872 if (fp->disable_tpa &&
873 (CQE_TYPE_START(cqe_fp_type) ||
874 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000875 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000876 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300877#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000878
Eric Dumazete52fcb22011-11-14 06:05:34 +0000879 if (CQE_TYPE_START(cqe_fp_type)) {
880 u16 queue = cqe_fp->queue_index;
881 DP(NETIF_MSG_RX_STATUS,
882 "calling tpa_start on queue %d\n",
883 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884
Eric Dumazete52fcb22011-11-14 06:05:34 +0000885 bnx2x_tpa_start(fp, queue,
886 bd_cons, bd_prod,
887 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000888
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000889 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000890 }
891 queue = cqe->end_agg_cqe.queue_index;
892 tpa_info = &fp->tpa_info[queue];
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_stop on queue %d\n",
895 queue);
896
897 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
898 tpa_info->len_on_bd;
899
900 if (fp->mode == TPA_MODE_GRO)
901 pages = (frag_size + tpa_info->full_page - 1) /
902 tpa_info->full_page;
903 else
904 pages = SGE_PAGE_ALIGN(frag_size) >>
905 SGE_PAGE_SHIFT;
906
907 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
908 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000909#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000910 if (bp->panic)
911 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000912#endif
913
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000914 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
915 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000916 }
917 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000918 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000919 pad = cqe_fp->placement_offset;
920 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000921 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000922 pad + RX_COPY_THRESH,
923 DMA_FROM_DEVICE);
924 pad += NET_SKB_PAD;
925 prefetch(data + pad); /* speedup eth_type_trans() */
926 /* is this an error packet? */
927 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000928 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000929 "ERROR flags %x rx packet %u\n",
930 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000931 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000932 goto reuse_rx;
933 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000934
Eric Dumazete52fcb22011-11-14 06:05:34 +0000935 /* Since we don't have a jumbo ring
936 * copy small packets if mtu > 1500
937 */
938 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
939 (len <= RX_COPY_THRESH)) {
940 skb = netdev_alloc_skb_ip_align(bp->dev, len);
941 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000942 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000943 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000944 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000945 goto reuse_rx;
946 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000947 memcpy(skb->data, data + pad, len);
948 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
949 } else {
950 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000951 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000952 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800953 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000954 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000955 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000956 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000957 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000958 bnx2x_fp_qstats(bp, fp)->
959 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 goto next_rx;
961 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000962 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000963 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000964 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
965 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000966 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000967reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000968 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000969 goto next_rx;
970 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000971 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000972
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000973 skb_put(skb, len);
974 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000976 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000977 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
978 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000979
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000980 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000981
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000982 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000983 bnx2x_csum_validate(skb, cqe, fp,
984 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000985
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000986 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000987
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300988 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
989 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000990 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300991 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000992
Eliezer Tamir8b80cda2013-07-10 17:13:26 +0300993 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +0300994
995 if (bnx2x_fp_ll_polling(fp))
996 netif_receive_skb(skb);
997 else
998 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000999next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001000 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001
1002 bd_cons = NEXT_RX_IDX(bd_cons);
1003 bd_prod = NEXT_RX_IDX(bd_prod);
1004 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1005 rx_pkt++;
1006next_cqe:
1007 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1008 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1009
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001010 /* mark CQE as free */
1011 BNX2X_SEED_CQE(cqe_fp);
1012
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001013 if (rx_pkt == budget)
1014 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001015
1016 comp_ring_cons = RCQ_BD(sw_comp_cons);
1017 cqe = &fp->rx_comp_ring[comp_ring_cons];
1018 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001019 } /* while */
1020
1021 fp->rx_bd_cons = bd_cons;
1022 fp->rx_bd_prod = bd_prod_fw;
1023 fp->rx_comp_cons = sw_comp_cons;
1024 fp->rx_comp_prod = sw_comp_prod;
1025
1026 /* Update producers */
1027 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1028 fp->rx_sge_prod);
1029
1030 fp->rx_pkt += rx_pkt;
1031 fp->rx_calls++;
1032
1033 return rx_pkt;
1034}
1035
1036static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1037{
1038 struct bnx2x_fastpath *fp = fp_cookie;
1039 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001040 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041
Merav Sicron51c1a582012-03-18 10:33:38 +00001042 DP(NETIF_MSG_INTR,
1043 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001044 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001045
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001046 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001047
1048#ifdef BNX2X_STOP_ON_ERROR
1049 if (unlikely(bp->panic))
1050 return IRQ_HANDLED;
1051#endif
1052
1053 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001054 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001056
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001057 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001058 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1059
1060 return IRQ_HANDLED;
1061}
1062
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001063/* HW Lock for shared dual port PHYs */
1064void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1065{
1066 mutex_lock(&bp->port.phy_mutex);
1067
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001069}
1070
1071void bnx2x_release_phy_lock(struct bnx2x *bp)
1072{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001074
1075 mutex_unlock(&bp->port.phy_mutex);
1076}
1077
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001078/* calculates MF speed according to current linespeed and MF configuration */
1079u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1080{
1081 u16 line_speed = bp->link_vars.line_speed;
1082 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001083 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1084 bp->mf_config[BP_VN(bp)]);
1085
1086 /* Calculate the current MAX line speed limit for the MF
1087 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001088 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001089 if (IS_MF_SI(bp))
1090 line_speed = (line_speed * maxCfg) / 100;
1091 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001092 u16 vn_max_rate = maxCfg * 100;
1093
1094 if (vn_max_rate < line_speed)
1095 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001096 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001097 }
1098
1099 return line_speed;
1100}
1101
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001102/**
1103 * bnx2x_fill_report_data - fill link report data to report
1104 *
1105 * @bp: driver handle
1106 * @data: link state to update
1107 *
1108 * It uses a none-atomic bit operations because is called under the mutex.
1109 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001110static void bnx2x_fill_report_data(struct bnx2x *bp,
1111 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001112{
1113 u16 line_speed = bnx2x_get_mf_speed(bp);
1114
1115 memset(data, 0, sizeof(*data));
1116
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001117 /* Fill the report data: effective line speed */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001118 data->line_speed = line_speed;
1119
1120 /* Link is down */
1121 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1122 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1123 &data->link_report_flags);
1124
1125 /* Full DUPLEX */
1126 if (bp->link_vars.duplex == DUPLEX_FULL)
1127 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1128
1129 /* Rx Flow Control is ON */
1130 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1131 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1132
1133 /* Tx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1135 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1136}
1137
1138/**
1139 * bnx2x_link_report - report link status to OS.
1140 *
1141 * @bp: driver handle
1142 *
1143 * Calls the __bnx2x_link_report() under the same locking scheme
1144 * as a link/PHY state managing code to ensure a consistent link
1145 * reporting.
1146 */
1147
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001148void bnx2x_link_report(struct bnx2x *bp)
1149{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001150 bnx2x_acquire_phy_lock(bp);
1151 __bnx2x_link_report(bp);
1152 bnx2x_release_phy_lock(bp);
1153}
1154
1155/**
1156 * __bnx2x_link_report - report link status to OS.
1157 *
1158 * @bp: driver handle
1159 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001160 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001161 * Should be called under the phy_lock.
1162 */
1163void __bnx2x_link_report(struct bnx2x *bp)
1164{
1165 struct bnx2x_link_report_data cur_data;
1166
1167 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001168 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001169 bnx2x_read_mf_cfg(bp);
1170
1171 /* Read the current link report info */
1172 bnx2x_fill_report_data(bp, &cur_data);
1173
1174 /* Don't report link down or exactly the same link status twice */
1175 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1176 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &bp->last_reported_link.link_report_flags) &&
1178 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &cur_data.link_report_flags)))
1180 return;
1181
1182 bp->link_cnt++;
1183
1184 /* We are going to report a new link parameters now -
1185 * remember the current data for the next time.
1186 */
1187 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1188
1189 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001191 netif_carrier_off(bp->dev);
1192 netdev_err(bp->dev, "NIC Link is Down\n");
1193 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001194 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001195 const char *duplex;
1196 const char *flow;
1197
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001198 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001199
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001200 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1201 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001202 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203 else
Joe Perches94f05b02011-08-14 12:16:20 +00001204 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001205
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001206 /* Handle the FC at the end so that only these flags would be
1207 * possibly set. This way we may easily check if there is no FC
1208 * enabled.
1209 */
1210 if (cur_data.link_report_flags) {
1211 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001213 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1214 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001215 flow = "ON - receive & transmit";
1216 else
1217 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001218 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001219 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220 }
Joe Perches94f05b02011-08-14 12:16:20 +00001221 } else {
1222 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001223 }
Joe Perches94f05b02011-08-14 12:16:20 +00001224 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1225 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001226 }
1227}
1228
Eric Dumazet1191cb82012-04-27 21:39:21 +00001229static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1230{
1231 int i;
1232
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 struct eth_rx_sge *sge;
1235
1236 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1237 sge->addr_hi =
1238 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1239 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1240
1241 sge->addr_lo =
1242 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1244 }
1245}
1246
1247static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1248 struct bnx2x_fastpath *fp, int last)
1249{
1250 int i;
1251
1252 for (i = 0; i < last; i++) {
1253 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1254 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1255 u8 *data = first_buf->data;
1256
1257 if (data == NULL) {
1258 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1259 continue;
1260 }
1261 if (tpa_info->tpa_state == BNX2X_TPA_START)
1262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(first_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001265 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001266 first_buf->data = NULL;
1267 }
1268}
1269
Merav Sicron55c11942012-11-07 00:45:48 +00001270void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1271{
1272 int j;
1273
1274 for_each_rx_queue_cnic(bp, j) {
1275 struct bnx2x_fastpath *fp = &bp->fp[j];
1276
1277 fp->rx_bd_cons = 0;
1278
1279 /* Activate BD ring */
1280 /* Warning!
1281 * this will generate an interrupt (to the TSTORM)
1282 * must only be done after chip is initialized
1283 */
1284 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1285 fp->rx_sge_prod);
1286 }
1287}
1288
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001289void bnx2x_init_rx_rings(struct bnx2x *bp)
1290{
1291 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001292 u16 ring_prod;
1293 int i, j;
1294
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001295 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001296 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001297 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001298
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001299 DP(NETIF_MSG_IFUP,
1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1301
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001302 if (!fp->disable_tpa) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001303 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001305 struct bnx2x_agg_info *tpa_info =
1306 &fp->tpa_info[i];
1307 struct sw_rx_bd *first_buf =
1308 &tpa_info->first_buf;
1309
Eric Dumazetd46d1322012-12-10 12:16:06 +00001310 first_buf->data = bnx2x_frag_alloc(fp);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001311 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001312 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1313 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001314 bnx2x_free_tpa_pool(bp, fp, i);
1315 fp->disable_tpa = 1;
1316 break;
1317 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001318 dma_unmap_addr_set(first_buf, mapping, 0);
1319 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001321
1322 /* "next page" elements initialization */
1323 bnx2x_set_next_page_sgl(fp);
1324
1325 /* set SGEs bit mask */
1326 bnx2x_init_sge_ring_bit_mask(fp);
1327
1328 /* Allocate SGEs and initialize the ring elements */
1329 for (i = 0, ring_prod = 0;
1330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1331
1332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001333 BNX2X_ERR("was only able to allocate %d rx sges\n",
1334 i);
1335 BNX2X_ERR("disabling TPA for queue[%d]\n",
1336 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001337 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001338 bnx2x_free_rx_sge_range(bp, fp,
1339 ring_prod);
1340 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001341 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001342 fp->disable_tpa = 1;
1343 ring_prod = 0;
1344 break;
1345 }
1346 ring_prod = NEXT_SGE_IDX(ring_prod);
1347 }
1348
1349 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001350 }
1351 }
1352
Merav Sicron55c11942012-11-07 00:45:48 +00001353 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354 struct bnx2x_fastpath *fp = &bp->fp[j];
1355
1356 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001357
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001358 /* Activate BD ring */
1359 /* Warning!
1360 * this will generate an interrupt (to the TSTORM)
1361 * must only be done after chip is initialized
1362 */
1363 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1364 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001365
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001366 if (j != 0)
1367 continue;
1368
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001369 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1372 U64_LO(fp->rx_comp_mapping));
1373 REG_WR(bp, BAR_USTRORM_INTMEM +
1374 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1375 U64_HI(fp->rx_comp_mapping));
1376 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001377 }
1378}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001379
Merav Sicron55c11942012-11-07 00:45:48 +00001380static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1381{
1382 u8 cos;
1383 struct bnx2x *bp = fp->bp;
1384
1385 for_each_cos_in_tx_queue(fp, cos) {
1386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1387 unsigned pkts_compl = 0, bytes_compl = 0;
1388
1389 u16 sw_prod = txdata->tx_pkt_prod;
1390 u16 sw_cons = txdata->tx_pkt_cons;
1391
1392 while (sw_cons != sw_prod) {
1393 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1394 &pkts_compl, &bytes_compl);
1395 sw_cons++;
1396 }
1397
1398 netdev_tx_reset_queue(
1399 netdev_get_tx_queue(bp->dev,
1400 txdata->txq_index));
1401 }
1402}
1403
1404static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1405{
1406 int i;
1407
1408 for_each_tx_queue_cnic(bp, i) {
1409 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1410 }
1411}
1412
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1414{
1415 int i;
1416
Merav Sicron55c11942012-11-07 00:45:48 +00001417 for_each_eth_queue(bp, i) {
1418 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001419 }
1420}
1421
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001422static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1423{
1424 struct bnx2x *bp = fp->bp;
1425 int i;
1426
1427 /* ring wasn't allocated */
1428 if (fp->rx_buf_ring == NULL)
1429 return;
1430
1431 for (i = 0; i < NUM_RX_BD; i++) {
1432 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001433 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001434
Eric Dumazete52fcb22011-11-14 06:05:34 +00001435 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001436 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001437 dma_unmap_single(&bp->pdev->dev,
1438 dma_unmap_addr(rx_buf, mapping),
1439 fp->rx_buf_size, DMA_FROM_DEVICE);
1440
Eric Dumazete52fcb22011-11-14 06:05:34 +00001441 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001442 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001443 }
1444}
1445
Merav Sicron55c11942012-11-07 00:45:48 +00001446static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1447{
1448 int j;
1449
1450 for_each_rx_queue_cnic(bp, j) {
1451 bnx2x_free_rx_bds(&bp->fp[j]);
1452 }
1453}
1454
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001455static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1456{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001457 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458
Merav Sicron55c11942012-11-07 00:45:48 +00001459 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001460 struct bnx2x_fastpath *fp = &bp->fp[j];
1461
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001462 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001463
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001465 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001466 }
1467}
1468
Merav Sicron55c11942012-11-07 00:45:48 +00001469void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1470{
1471 bnx2x_free_tx_skbs_cnic(bp);
1472 bnx2x_free_rx_skbs_cnic(bp);
1473}
1474
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001475void bnx2x_free_skbs(struct bnx2x *bp)
1476{
1477 bnx2x_free_tx_skbs(bp);
1478 bnx2x_free_rx_skbs(bp);
1479}
1480
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001481void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1482{
1483 /* load old values */
1484 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1485
1486 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1487 /* leave all but MAX value */
1488 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1489
1490 /* set new MAX value */
1491 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1492 & FUNC_MF_CFG_MAX_BW_MASK;
1493
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1495 }
1496}
1497
Dmitry Kravkovca924292011-06-14 01:33:08 +00001498/**
1499 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1500 *
1501 * @bp: driver handle
1502 * @nvecs: number of vectors to be released
1503 */
1504static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001505{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001506 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001507
Dmitry Kravkovca924292011-06-14 01:33:08 +00001508 if (nvecs == offset)
1509 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001510
1511 /* VFs don't have a default SB */
1512 if (IS_PF(bp)) {
1513 free_irq(bp->msix_table[offset].vector, bp->dev);
1514 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1515 bp->msix_table[offset].vector);
1516 offset++;
1517 }
Merav Sicron55c11942012-11-07 00:45:48 +00001518
1519 if (CNIC_SUPPORT(bp)) {
1520 if (nvecs == offset)
1521 return;
1522 offset++;
1523 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001524
Dmitry Kravkovca924292011-06-14 01:33:08 +00001525 for_each_eth_queue(bp, i) {
1526 if (nvecs == offset)
1527 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001528 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1529 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001530
1531 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001532 }
1533}
1534
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001535void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001536{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001537 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001538 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1539 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1540
1541 /* vfs don't have a default status block */
1542 if (IS_PF(bp))
1543 nvecs++;
1544
1545 bnx2x_free_msix_irqs(bp, nvecs);
1546 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001547 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001548 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001549}
1550
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001551int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001552{
Ariel Elior1ab44342013-01-01 05:22:23 +00001553 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001554
Ariel Elior1ab44342013-01-01 05:22:23 +00001555 /* VFs don't have a default status block */
1556 if (IS_PF(bp)) {
1557 bp->msix_table[msix_vec].entry = msix_vec;
1558 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1559 bp->msix_table[0].entry);
1560 msix_vec++;
1561 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001562
Merav Sicron55c11942012-11-07 00:45:48 +00001563 /* Cnic requires an msix vector for itself */
1564 if (CNIC_SUPPORT(bp)) {
1565 bp->msix_table[msix_vec].entry = msix_vec;
1566 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1567 msix_vec, bp->msix_table[msix_vec].entry);
1568 msix_vec++;
1569 }
1570
Ariel Elior6383c0b2011-07-14 08:31:57 +00001571 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001572 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001573 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001574 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1575 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001576 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001577 }
1578
Ariel Elior1ab44342013-01-01 05:22:23 +00001579 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1580 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001581
Ariel Elior1ab44342013-01-01 05:22:23 +00001582 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001583
1584 /*
1585 * reconfigure number of tx/rx queues according to available
1586 * MSI-X vectors
1587 */
Merav Sicron55c11942012-11-07 00:45:48 +00001588 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001589 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001590 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001591
Merav Sicron51c1a582012-03-18 10:33:38 +00001592 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001593
1594 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1595
1596 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001597 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1598 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001599 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001600 /*
1601 * decrease number of queues by number of unallocated entries
1602 */
Merav Sicron55c11942012-11-07 00:45:48 +00001603 bp->num_ethernet_queues -= diff;
1604 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001605
Merav Sicron51c1a582012-03-18 10:33:38 +00001606 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001607 bp->num_queues);
1608 } else if (rc > 0) {
1609 /* Get by with single vector */
1610 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1611 if (rc) {
1612 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1613 rc);
1614 goto no_msix;
1615 }
1616
1617 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1618 bp->flags |= USING_SINGLE_MSIX_FLAG;
1619
Merav Sicron55c11942012-11-07 00:45:48 +00001620 BNX2X_DEV_INFO("set number of queues to 1\n");
1621 bp->num_ethernet_queues = 1;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001623 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001624 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001625 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001626 }
1627
1628 bp->flags |= USING_MSIX_FLAG;
1629
1630 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001631
1632no_msix:
1633 /* fall to INTx if not enough memory */
1634 if (rc == -ENOMEM)
1635 bp->flags |= DISABLE_MSI_FLAG;
1636
1637 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001638}
1639
1640static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1641{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001642 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001643
Ariel Eliorad5afc82013-01-01 05:22:26 +00001644 /* no default status block for vf */
1645 if (IS_PF(bp)) {
1646 rc = request_irq(bp->msix_table[offset++].vector,
1647 bnx2x_msix_sp_int, 0,
1648 bp->dev->name, bp->dev);
1649 if (rc) {
1650 BNX2X_ERR("request sp irq failed\n");
1651 return -EBUSY;
1652 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001653 }
1654
Merav Sicron55c11942012-11-07 00:45:48 +00001655 if (CNIC_SUPPORT(bp))
1656 offset++;
1657
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001658 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001659 struct bnx2x_fastpath *fp = &bp->fp[i];
1660 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1661 bp->dev->name, i);
1662
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001663 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001664 bnx2x_msix_fp_int, 0, fp->name, fp);
1665 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001666 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1667 bp->msix_table[offset].vector, rc);
1668 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001669 return -EBUSY;
1670 }
1671
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001672 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673 }
1674
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001675 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001676 if (IS_PF(bp)) {
1677 offset = 1 + CNIC_SUPPORT(bp);
1678 netdev_info(bp->dev,
1679 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1680 bp->msix_table[0].vector,
1681 0, bp->msix_table[offset].vector,
1682 i - 1, bp->msix_table[offset + i - 1].vector);
1683 } else {
1684 offset = CNIC_SUPPORT(bp);
1685 netdev_info(bp->dev,
1686 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1689 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001690 return 0;
1691}
1692
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001693int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001694{
1695 int rc;
1696
1697 rc = pci_enable_msi(bp->pdev);
1698 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001699 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001700 return -1;
1701 }
1702 bp->flags |= USING_MSI_FLAG;
1703
1704 return 0;
1705}
1706
1707static int bnx2x_req_irq(struct bnx2x *bp)
1708{
1709 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001710 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001711
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001712 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001713 flags = 0;
1714 else
1715 flags = IRQF_SHARED;
1716
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001717 if (bp->flags & USING_MSIX_FLAG)
1718 irq = bp->msix_table[0].vector;
1719 else
1720 irq = bp->pdev->irq;
1721
1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001723}
1724
Yuval Mintzc957d092013-06-25 08:50:11 +03001725static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001726{
1727 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001728 if (bp->flags & USING_MSIX_FLAG &&
1729 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001730 rc = bnx2x_req_msix_irqs(bp);
1731 if (rc)
1732 return rc;
1733 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001734 rc = bnx2x_req_irq(bp);
1735 if (rc) {
1736 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1737 return rc;
1738 }
1739 if (bp->flags & USING_MSI_FLAG) {
1740 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001741 netdev_info(bp->dev, "using MSI IRQ %d\n",
1742 bp->dev->irq);
1743 }
1744 if (bp->flags & USING_MSIX_FLAG) {
1745 bp->dev->irq = bp->msix_table[0].vector;
1746 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1747 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001748 }
1749 }
1750
1751 return 0;
1752}
1753
Merav Sicron55c11942012-11-07 00:45:48 +00001754static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1755{
1756 int i;
1757
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001758 for_each_rx_queue_cnic(bp, i) {
1759 bnx2x_fp_init_lock(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001760 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001761 }
Merav Sicron55c11942012-11-07 00:45:48 +00001762}
1763
Eric Dumazet1191cb82012-04-27 21:39:21 +00001764static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001765{
1766 int i;
1767
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001768 for_each_eth_queue(bp, i) {
1769 bnx2x_fp_init_lock(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001770 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001771 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772}
1773
Merav Sicron55c11942012-11-07 00:45:48 +00001774static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1775{
1776 int i;
1777
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001778 local_bh_disable();
1779 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001780 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001781 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1782 mdelay(1);
1783 }
1784 local_bh_enable();
Merav Sicron55c11942012-11-07 00:45:48 +00001785}
1786
Eric Dumazet1191cb82012-04-27 21:39:21 +00001787static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001788{
1789 int i;
1790
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001791 local_bh_disable();
1792 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001793 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001794 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1795 mdelay(1);
1796 }
1797 local_bh_enable();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001798}
1799
1800void bnx2x_netif_start(struct bnx2x *bp)
1801{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001802 if (netif_running(bp->dev)) {
1803 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001804 if (CNIC_LOADED(bp))
1805 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001806 bnx2x_int_enable(bp);
1807 if (bp->state == BNX2X_STATE_OPEN)
1808 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001809 }
1810}
1811
1812void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1813{
1814 bnx2x_int_disable_sync(bp, disable_hw);
1815 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001816 if (CNIC_LOADED(bp))
1817 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001818}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001819
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001820u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1821{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001822 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001823
Merav Sicron55c11942012-11-07 00:45:48 +00001824 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001825 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1826 u16 ether_type = ntohs(hdr->h_proto);
1827
1828 /* Skip VLAN tag if present */
1829 if (ether_type == ETH_P_8021Q) {
1830 struct vlan_ethhdr *vhdr =
1831 (struct vlan_ethhdr *)skb->data;
1832
1833 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1834 }
1835
1836 /* If ethertype is FCoE or FIP - use FCoE ring */
1837 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001838 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001839 }
Merav Sicron55c11942012-11-07 00:45:48 +00001840
David S. Miller823dcd22011-08-20 10:39:12 -07001841 /* select a non-FCoE queue */
Eric Dumazetada7c192013-05-31 14:32:55 +00001842 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001843}
1844
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001845void bnx2x_set_num_queues(struct bnx2x *bp)
1846{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001847 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001848 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001849
Barak Witkowskia3348722012-04-23 03:04:46 +00001850 /* override in STORAGE SD modes */
1851 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001852 bp->num_ethernet_queues = 1;
1853
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001854 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001855 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1856 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001857
1858 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001859}
1860
David S. Miller823dcd22011-08-20 10:39:12 -07001861/**
1862 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1863 *
1864 * @bp: Driver handle
1865 *
1866 * We currently support for at most 16 Tx queues for each CoS thus we will
1867 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1868 * bp->max_cos.
1869 *
1870 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1871 * index after all ETH L2 indices.
1872 *
1873 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1874 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001875 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001876 *
1877 * The proper configuration of skb->queue_mapping is handled by
1878 * bnx2x_select_queue() and __skb_tx_hash().
1879 *
1880 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1881 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1882 */
Merav Sicron55c11942012-11-07 00:45:48 +00001883static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001884{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001885 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001886
Merav Sicron65565882012-06-19 07:48:26 +00001887 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001888 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001889
1890/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001891 if (include_cnic && !NO_FCOE(bp)) {
1892 rx++;
1893 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001894 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001895
1896 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1897 if (rc) {
1898 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1899 return rc;
1900 }
1901 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1902 if (rc) {
1903 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1904 return rc;
1905 }
1906
Merav Sicron51c1a582012-03-18 10:33:38 +00001907 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001908 tx, rx);
1909
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001910 return rc;
1911}
1912
Eric Dumazet1191cb82012-04-27 21:39:21 +00001913static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001914{
1915 int i;
1916
1917 for_each_queue(bp, i) {
1918 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001919 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001920
1921 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1922 if (IS_FCOE_IDX(i))
1923 /*
1924 * Although there are no IP frames expected to arrive to
1925 * this ring we still want to add an
1926 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1927 * overrun attack.
1928 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001929 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001930 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001931 mtu = bp->dev->mtu;
1932 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1933 IP_HEADER_ALIGNMENT_PADDING +
1934 ETH_OVREHEAD +
1935 mtu +
1936 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001937 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001938 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1939 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1940 else
1941 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001942 }
1943}
1944
Eric Dumazet1191cb82012-04-27 21:39:21 +00001945static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001946{
1947 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001948 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1949
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001950 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001951 * enabled
1952 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001953 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1954 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001955 bp->fp->cl_id +
1956 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957
1958 /*
1959 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1960 * per-port, so if explicit configuration is needed , do it only
1961 * for a PMF.
1962 *
1963 * For 57712 and newer on the other hand it's a per-function
1964 * configuration.
1965 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001966 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001967}
1968
Dmitry Kravkov96305232012-04-03 18:41:30 +00001969int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001970 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001971{
Yuval Mintz3b603062012-03-18 10:33:39 +00001972 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001973
1974 /* Although RSS is meaningless when there is a single HW queue we
1975 * still need it enabled in order to have HW Rx hash generated.
1976 *
1977 * if (!is_eth_multi(bp))
1978 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1979 */
1980
Dmitry Kravkov96305232012-04-03 18:41:30 +00001981 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001982
1983 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1984
Dmitry Kravkov96305232012-04-03 18:41:30 +00001985 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001986
Dmitry Kravkov96305232012-04-03 18:41:30 +00001987 /* RSS configuration */
1988 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1989 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1990 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1991 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001992 if (rss_obj->udp_rss_v4)
1993 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1994 if (rss_obj->udp_rss_v6)
1995 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001996
Dmitry Kravkov96305232012-04-03 18:41:30 +00001997 /* Hash bits */
1998 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001999
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002000 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002001
Dmitry Kravkov96305232012-04-03 18:41:30 +00002002 if (config_hash) {
2003 /* RSS keys */
Akinobu Mita8376d0b2012-12-17 16:04:28 -08002004 prandom_bytes(params.rss_key, sizeof(params.rss_key));
Dmitry Kravkov96305232012-04-03 18:41:30 +00002005 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002006 }
2007
2008 return bnx2x_config_rss(bp, &params);
2009}
2010
Eric Dumazet1191cb82012-04-27 21:39:21 +00002011static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002012{
Yuval Mintz3b603062012-03-18 10:33:39 +00002013 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002014
2015 /* Prepare parameters for function state transitions */
2016 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2017
2018 func_params.f_obj = &bp->func_obj;
2019 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2020
2021 func_params.params.hw_init.load_phase = load_code;
2022
2023 return bnx2x_func_state_change(bp, &func_params);
2024}
2025
2026/*
2027 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002028 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002029 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002030void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002031{
2032 int rc;
2033 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002034 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002035 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002036
2037 /***************** Cleanup MACs' object first *************************/
2038
2039 /* Wait for completion of requested */
2040 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2041 /* Perform a dry cleanup */
2042 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2043
2044 /* Clean ETH primary MAC */
2045 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002046 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002047 &ramrod_flags);
2048 if (rc != 0)
2049 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2050
2051 /* Cleanup UC list */
2052 vlan_mac_flags = 0;
2053 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2054 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2055 &ramrod_flags);
2056 if (rc != 0)
2057 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2058
2059 /***************** Now clean mcast object *****************************/
2060 rparam.mcast_obj = &bp->mcast_obj;
2061 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2062
2063 /* Add a DEL command... */
2064 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2065 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002066 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2067 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002068
2069 /* ...and wait until all pending commands are cleared */
2070 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2071 while (rc != 0) {
2072 if (rc < 0) {
2073 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2074 rc);
2075 return;
2076 }
2077
2078 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2079 }
2080}
2081
2082#ifndef BNX2X_STOP_ON_ERROR
2083#define LOAD_ERROR_EXIT(bp, label) \
2084 do { \
2085 (bp)->state = BNX2X_STATE_ERROR; \
2086 goto label; \
2087 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002088
2089#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2090 do { \
2091 bp->cnic_loaded = false; \
2092 goto label; \
2093 } while (0)
2094#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002095#define LOAD_ERROR_EXIT(bp, label) \
2096 do { \
2097 (bp)->state = BNX2X_STATE_ERROR; \
2098 (bp)->panic = 1; \
2099 return -EBUSY; \
2100 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002101#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2102 do { \
2103 bp->cnic_loaded = false; \
2104 (bp)->panic = 1; \
2105 return -EBUSY; \
2106 } while (0)
2107#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002108
Ariel Eliorad5afc82013-01-01 05:22:26 +00002109static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002110{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002111 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2112 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2113 return;
2114}
Yuval Mintz452427b2012-03-26 20:47:07 +00002115
Ariel Eliorad5afc82013-01-01 05:22:26 +00002116static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2117{
Ariel Elior8db573b2013-01-01 05:22:37 +00002118 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002119 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002120
Ariel Eliorad5afc82013-01-01 05:22:26 +00002121 /* number of queues for statistics is number of eth queues + FCoE */
2122 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002123
Ariel Eliorad5afc82013-01-01 05:22:26 +00002124 /* Total number of FW statistics requests =
2125 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2126 * and fcoe l2 queue) stats + num of queues (which includes another 1
2127 * for fcoe l2 queue if applicable)
2128 */
2129 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2130
Ariel Elior8db573b2013-01-01 05:22:37 +00002131 /* vf stats appear in the request list, but their data is allocated by
2132 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2133 * it is used to determine where to place the vf stats queries in the
2134 * request struct
2135 */
2136 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002137 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002138
Ariel Eliorad5afc82013-01-01 05:22:26 +00002139 /* Request is built from stats_query_header and an array of
2140 * stats_query_cmd_group each of which contains
2141 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2142 * configured in the stats_query_header.
2143 */
2144 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002145 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2146 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002147 1 : 0));
2148
Ariel Elior8db573b2013-01-01 05:22:37 +00002149 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2150 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002151 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2152 num_groups * sizeof(struct stats_query_cmd_group);
2153
2154 /* Data for statistics requests + stats_counter
2155 * stats_counter holds per-STORM counters that are incremented
2156 * when STORM has finished with the current request.
2157 * memory for FCoE offloaded statistics are counted anyway,
2158 * even if they will not be sent.
2159 * VF stats are not accounted for here as the data of VF stats is stored
2160 * in memory allocated by the VF, not here.
2161 */
2162 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2163 sizeof(struct per_pf_stats) +
2164 sizeof(struct fcoe_statistics_params) +
2165 sizeof(struct per_queue_stats) * num_queue_stats +
2166 sizeof(struct stats_counter);
2167
2168 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2169 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2170
2171 /* Set shortcuts */
2172 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2173 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2174 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2175 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2176 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2177 bp->fw_stats_req_sz;
2178
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002179 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002180 U64_HI(bp->fw_stats_req_mapping),
2181 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002182 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002183 U64_HI(bp->fw_stats_data_mapping),
2184 U64_LO(bp->fw_stats_data_mapping));
2185 return 0;
2186
2187alloc_mem_err:
2188 bnx2x_free_fw_stats_mem(bp);
2189 BNX2X_ERR("Can't allocate FW stats memory\n");
2190 return -ENOMEM;
2191}
2192
2193/* send load request to mcp and analyze response */
2194static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2195{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002196 u32 param;
2197
Ariel Eliorad5afc82013-01-01 05:22:26 +00002198 /* init fw_seq */
2199 bp->fw_seq =
2200 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2201 DRV_MSG_SEQ_NUMBER_MASK);
2202 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2203
2204 /* Get current FW pulse sequence */
2205 bp->fw_drv_pulse_wr_seq =
2206 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2207 DRV_PULSE_SEQ_MASK);
2208 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2209
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002210 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2211
2212 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2213 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2214
Ariel Eliorad5afc82013-01-01 05:22:26 +00002215 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002216 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002217
2218 /* if mcp fails to respond we must abort */
2219 if (!(*load_code)) {
2220 BNX2X_ERR("MCP response failure, aborting\n");
2221 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002222 }
2223
Ariel Eliorad5afc82013-01-01 05:22:26 +00002224 /* If mcp refused (e.g. other port is in diagnostic mode) we
2225 * must abort
2226 */
2227 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2228 BNX2X_ERR("MCP refused load request, aborting\n");
2229 return -EBUSY;
2230 }
2231 return 0;
2232}
2233
2234/* check whether another PF has already loaded FW to chip. In
2235 * virtualized environments a pf from another VM may have already
2236 * initialized the device including loading FW
2237 */
2238int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2239{
2240 /* is another pf loaded on this engine? */
2241 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2242 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2243 /* build my FW version dword */
2244 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2245 (BCM_5710_FW_MINOR_VERSION << 8) +
2246 (BCM_5710_FW_REVISION_VERSION << 16) +
2247 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2248
2249 /* read loaded FW from chip */
2250 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2251
2252 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2253 loaded_fw, my_fw);
2254
2255 /* abort nic load if version mismatch */
2256 if (my_fw != loaded_fw) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002257 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002258 loaded_fw, my_fw);
2259 return -EBUSY;
2260 }
2261 }
2262 return 0;
2263}
2264
2265/* returns the "mcp load_code" according to global load_count array */
2266static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2267{
2268 int path = BP_PATH(bp);
2269
2270 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2271 path, load_count[path][0], load_count[path][1],
2272 load_count[path][2]);
2273 load_count[path][0]++;
2274 load_count[path][1 + port]++;
2275 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2276 path, load_count[path][0], load_count[path][1],
2277 load_count[path][2]);
2278 if (load_count[path][0] == 1)
2279 return FW_MSG_CODE_DRV_LOAD_COMMON;
2280 else if (load_count[path][1 + port] == 1)
2281 return FW_MSG_CODE_DRV_LOAD_PORT;
2282 else
2283 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2284}
2285
2286/* mark PMF if applicable */
2287static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2288{
2289 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2290 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2291 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2292 bp->port.pmf = 1;
2293 /* We need the barrier to ensure the ordering between the
2294 * writing to bp->port.pmf here and reading it from the
2295 * bnx2x_periodic_task().
2296 */
2297 smp_mb();
2298 } else {
2299 bp->port.pmf = 0;
2300 }
2301
2302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2303}
2304
2305static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2306{
2307 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2308 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2309 (bp->common.shmem2_base)) {
2310 if (SHMEM2_HAS(bp, dcc_support))
2311 SHMEM2_WR(bp, dcc_support,
2312 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2313 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2314 if (SHMEM2_HAS(bp, afex_driver_support))
2315 SHMEM2_WR(bp, afex_driver_support,
2316 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2317 }
2318
2319 /* Set AFEX default VLAN tag to an invalid value */
2320 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002321}
2322
Eric Dumazet1191cb82012-04-27 21:39:21 +00002323/**
2324 * bnx2x_bz_fp - zero content of the fastpath structure.
2325 *
2326 * @bp: driver handle
2327 * @index: fastpath index to be zeroed
2328 *
2329 * Makes sure the contents of the bp->fp[index].napi is kept
2330 * intact.
2331 */
2332static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2333{
2334 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002335 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002336 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002337 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002338
Eric Dumazet1191cb82012-04-27 21:39:21 +00002339 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002340 if (fp->tpa_info)
2341 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2342 sizeof(struct bnx2x_agg_info));
2343 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002344
2345 /* Restore the NAPI object as it has been already initialized */
2346 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002347 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002348 fp->bp = bp;
2349 fp->index = index;
2350 if (IS_ETH_FP(fp))
2351 fp->max_cos = bp->max_cos;
2352 else
2353 /* Special queues support only one CoS */
2354 fp->max_cos = 1;
2355
Merav Sicron65565882012-06-19 07:48:26 +00002356 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002357 if (IS_FCOE_FP(fp))
2358 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002359 if (IS_ETH_FP(fp))
2360 for_each_cos_in_tx_queue(fp, cos)
2361 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2362 BNX2X_NUM_ETH_QUEUES(bp) + index];
2363
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002364 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002365 * minimal size so it must be set prior to queue memory allocation
2366 */
2367 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2368 (bp->flags & GRO_ENABLE_FLAG &&
2369 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2370 if (bp->flags & TPA_ENABLE_FLAG)
2371 fp->mode = TPA_MODE_LRO;
2372 else if (bp->flags & GRO_ENABLE_FLAG)
2373 fp->mode = TPA_MODE_GRO;
2374
Eric Dumazet1191cb82012-04-27 21:39:21 +00002375 /* We don't want TPA on an FCoE L2 ring */
2376 if (IS_FCOE_FP(fp))
2377 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002378}
2379
2380int bnx2x_load_cnic(struct bnx2x *bp)
2381{
2382 int i, rc, port = BP_PORT(bp);
2383
2384 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2385
2386 mutex_init(&bp->cnic_mutex);
2387
Ariel Eliorad5afc82013-01-01 05:22:26 +00002388 if (IS_PF(bp)) {
2389 rc = bnx2x_alloc_mem_cnic(bp);
2390 if (rc) {
2391 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2392 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2393 }
Merav Sicron55c11942012-11-07 00:45:48 +00002394 }
2395
2396 rc = bnx2x_alloc_fp_mem_cnic(bp);
2397 if (rc) {
2398 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2399 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2400 }
2401
2402 /* Update the number of queues with the cnic queues */
2403 rc = bnx2x_set_real_num_queues(bp, 1);
2404 if (rc) {
2405 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2406 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2407 }
2408
2409 /* Add all CNIC NAPI objects */
2410 bnx2x_add_all_napi_cnic(bp);
2411 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2412 bnx2x_napi_enable_cnic(bp);
2413
2414 rc = bnx2x_init_hw_func_cnic(bp);
2415 if (rc)
2416 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2417
2418 bnx2x_nic_init_cnic(bp);
2419
Ariel Eliorad5afc82013-01-01 05:22:26 +00002420 if (IS_PF(bp)) {
2421 /* Enable Timer scan */
2422 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002423
Ariel Eliorad5afc82013-01-01 05:22:26 +00002424 /* setup cnic queues */
2425 for_each_cnic_queue(bp, i) {
2426 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2427 if (rc) {
2428 BNX2X_ERR("Queue setup failed\n");
2429 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2430 }
Merav Sicron55c11942012-11-07 00:45:48 +00002431 }
2432 }
2433
2434 /* Initialize Rx filter. */
2435 netif_addr_lock_bh(bp->dev);
2436 bnx2x_set_rx_mode(bp->dev);
2437 netif_addr_unlock_bh(bp->dev);
2438
2439 /* re-read iscsi info */
2440 bnx2x_get_iscsi_info(bp);
2441 bnx2x_setup_cnic_irq_info(bp);
2442 bnx2x_setup_cnic_info(bp);
2443 bp->cnic_loaded = true;
2444 if (bp->state == BNX2X_STATE_OPEN)
2445 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2446
Merav Sicron55c11942012-11-07 00:45:48 +00002447 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2448
2449 return 0;
2450
2451#ifndef BNX2X_STOP_ON_ERROR
2452load_error_cnic2:
2453 /* Disable Timer scan */
2454 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2455
2456load_error_cnic1:
2457 bnx2x_napi_disable_cnic(bp);
2458 /* Update the number of queues without the cnic queues */
2459 rc = bnx2x_set_real_num_queues(bp, 0);
2460 if (rc)
2461 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2462load_error_cnic0:
2463 BNX2X_ERR("CNIC-related load failed\n");
2464 bnx2x_free_fp_mem_cnic(bp);
2465 bnx2x_free_mem_cnic(bp);
2466 return rc;
2467#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002468}
2469
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002470/* must be called with rtnl_lock */
2471int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2472{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002473 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002474 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002475
Merav Sicron55c11942012-11-07 00:45:48 +00002476 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2477 DP(NETIF_MSG_IFUP,
2478 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2479
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002480#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002481 if (unlikely(bp->panic)) {
2482 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002483 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002484 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002485#endif
2486
2487 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2488
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002489 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002490 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2491 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2492 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002493
Ariel Eliorad5afc82013-01-01 05:22:26 +00002494 if (IS_PF(bp))
2495 /* must be called before memory allocation and HW init */
2496 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002497
Ariel Elior6383c0b2011-07-14 08:31:57 +00002498 /*
2499 * Zero fastpath structures preserving invariants like napi, which are
2500 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002501 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002502 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002503 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002504 for_each_queue(bp, i)
2505 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002506 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2507 bp->num_cnic_queues) *
2508 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002509
Merav Sicron55c11942012-11-07 00:45:48 +00002510 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002511
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002512 /* Set the receive queues buffer size */
2513 bnx2x_set_rx_buf_size(bp);
2514
Ariel Eliorad5afc82013-01-01 05:22:26 +00002515 if (IS_PF(bp)) {
2516 rc = bnx2x_alloc_mem(bp);
2517 if (rc) {
2518 BNX2X_ERR("Unable to allocate bp memory\n");
2519 return rc;
2520 }
2521 }
2522
2523 /* Allocated memory for FW statistics */
2524 if (bnx2x_alloc_fw_stats_mem(bp))
2525 LOAD_ERROR_EXIT(bp, load_error0);
2526
2527 /* need to be done after alloc mem, since it's self adjusting to amount
2528 * of memory available for RSS queues
2529 */
2530 rc = bnx2x_alloc_fp_mem(bp);
2531 if (rc) {
2532 BNX2X_ERR("Unable to allocate memory for fps\n");
2533 LOAD_ERROR_EXIT(bp, load_error0);
2534 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002535
Ariel Elior8d9ac292013-01-01 05:22:27 +00002536 /* request pf to initialize status blocks */
2537 if (IS_VF(bp)) {
2538 rc = bnx2x_vfpf_init(bp);
2539 if (rc)
2540 LOAD_ERROR_EXIT(bp, load_error0);
2541 }
2542
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002543 /* As long as bnx2x_alloc_mem() may possibly update
2544 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002545 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002546 */
Merav Sicron55c11942012-11-07 00:45:48 +00002547 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002548 if (rc) {
2549 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002550 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002551 }
2552
Ariel Elior6383c0b2011-07-14 08:31:57 +00002553 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002554 * this configuration may be overridden by a multi class queue
2555 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002556 */
2557 bnx2x_setup_tc(bp->dev, bp->max_cos);
2558
Merav Sicron26614ba2012-08-27 03:26:19 +00002559 /* Add all NAPI objects */
2560 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002561 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002562 bnx2x_napi_enable(bp);
2563
Ariel Eliorad5afc82013-01-01 05:22:26 +00002564 if (IS_PF(bp)) {
2565 /* set pf load just before approaching the MCP */
2566 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002567
Ariel Eliorad5afc82013-01-01 05:22:26 +00002568 /* if mcp exists send load request and analyze response */
2569 if (!BP_NOMCP(bp)) {
2570 /* attempt to load pf */
2571 rc = bnx2x_nic_load_request(bp, &load_code);
2572 if (rc)
2573 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002574
Ariel Eliorad5afc82013-01-01 05:22:26 +00002575 /* what did mcp say? */
2576 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2577 if (rc) {
2578 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002579 LOAD_ERROR_EXIT(bp, load_error2);
2580 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002581 } else {
2582 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002583 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002584
Ariel Eliorad5afc82013-01-01 05:22:26 +00002585 /* mark pmf if applicable */
2586 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002587
Ariel Eliorad5afc82013-01-01 05:22:26 +00002588 /* Init Function state controlling object */
2589 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002590
Ariel Eliorad5afc82013-01-01 05:22:26 +00002591 /* Initialize HW */
2592 rc = bnx2x_init_hw(bp, load_code);
2593 if (rc) {
2594 BNX2X_ERR("HW init failed, aborting\n");
2595 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2596 LOAD_ERROR_EXIT(bp, load_error2);
2597 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002598 }
2599
Yuval Mintzecf01c22013-04-22 02:53:03 +00002600 bnx2x_pre_irq_nic_init(bp);
2601
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002602 /* Connect to IRQs */
2603 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002604 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002605 BNX2X_ERR("setup irqs failed\n");
2606 if (IS_PF(bp))
2607 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002608 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002609 }
2610
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002611 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002612 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002613 /* Setup NIC internals and enable interrupts */
2614 bnx2x_post_irq_nic_init(bp, load_code);
2615
Ariel Eliorad5afc82013-01-01 05:22:26 +00002616 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002617 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002618
Ariel Eliorad5afc82013-01-01 05:22:26 +00002619 /* Set AFEX default VLAN tag to an invalid value */
2620 bp->afex_def_vlan_tag = -1;
2621 bnx2x_nic_load_afex_dcc(bp, load_code);
2622 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2623 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002624 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002625 BNX2X_ERR("Function start failed!\n");
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2627
Merav Sicron55c11942012-11-07 00:45:48 +00002628 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002629 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002630
Ariel Eliorad5afc82013-01-01 05:22:26 +00002631 /* Send LOAD_DONE command to MCP */
2632 if (!BP_NOMCP(bp)) {
2633 load_code = bnx2x_fw_command(bp,
2634 DRV_MSG_CODE_LOAD_DONE, 0);
2635 if (!load_code) {
2636 BNX2X_ERR("MCP response failure, aborting\n");
2637 rc = -EBUSY;
2638 LOAD_ERROR_EXIT(bp, load_error3);
2639 }
2640 }
2641
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002642 /* initialize FW coalescing state machines in RAM */
2643 bnx2x_update_coalesce(bp);
2644
Ariel Eliorad5afc82013-01-01 05:22:26 +00002645 /* setup the leading queue */
2646 rc = bnx2x_setup_leading(bp);
2647 if (rc) {
2648 BNX2X_ERR("Setup leading failed!\n");
2649 LOAD_ERROR_EXIT(bp, load_error3);
2650 }
2651
2652 /* set up the rest of the queues */
2653 for_each_nondefault_eth_queue(bp, i) {
2654 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2655 if (rc) {
2656 BNX2X_ERR("Queue setup failed\n");
2657 LOAD_ERROR_EXIT(bp, load_error3);
2658 }
2659 }
2660
2661 /* setup rss */
2662 rc = bnx2x_init_rss_pf(bp);
2663 if (rc) {
2664 BNX2X_ERR("PF RSS init failed\n");
2665 LOAD_ERROR_EXIT(bp, load_error3);
2666 }
Ariel Elior8d9ac292013-01-01 05:22:27 +00002667
2668 } else { /* vf */
2669 for_each_eth_queue(bp, i) {
2670 rc = bnx2x_vfpf_setup_q(bp, i);
2671 if (rc) {
2672 BNX2X_ERR("Queue setup failed\n");
2673 LOAD_ERROR_EXIT(bp, load_error3);
2674 }
2675 }
Merav Sicron51c1a582012-03-18 10:33:38 +00002676 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002677
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002678 /* Now when Clients are configured we are ready to work */
2679 bp->state = BNX2X_STATE_OPEN;
2680
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002681 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002682 if (IS_PF(bp))
2683 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002684 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002685 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2686 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002687 if (rc) {
2688 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002689 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002690 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002691
Ariel Eliorad5afc82013-01-01 05:22:26 +00002692 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002693 bnx2x_update_max_mf_config(bp, bp->pending_max);
2694 bp->pending_max = 0;
2695 }
2696
Ariel Eliorad5afc82013-01-01 05:22:26 +00002697 if (bp->port.pmf) {
2698 rc = bnx2x_initial_phy_init(bp, load_mode);
2699 if (rc)
2700 LOAD_ERROR_EXIT(bp, load_error3);
2701 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002702 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002703
2704 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002705
2706 /* Initialize Rx filter. */
2707 netif_addr_lock_bh(bp->dev);
2708 bnx2x_set_rx_mode(bp->dev);
2709 netif_addr_unlock_bh(bp->dev);
2710
2711 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002712 switch (load_mode) {
2713 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002714 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002715 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002716 break;
2717
2718 case LOAD_OPEN:
2719 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002720 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002721 break;
2722
2723 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002724 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002725 bp->state = BNX2X_STATE_DIAG;
2726 break;
2727
2728 default:
2729 break;
2730 }
2731
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002732 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002733 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002734 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002735 bnx2x__link_status_update(bp);
2736
2737 /* start the timer */
2738 mod_timer(&bp->timer, jiffies + bp->current_interval);
2739
Merav Sicron55c11942012-11-07 00:45:48 +00002740 if (CNIC_ENABLED(bp))
2741 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002742
Ariel Eliorad5afc82013-01-01 05:22:26 +00002743 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2744 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002745 u32 val;
2746 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2747 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2748 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2749 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2750 }
2751
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002752 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002753 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002754 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002755 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002756 return -EBUSY;
2757 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002758
Barak Witkowski98768792012-06-19 07:48:31 +00002759 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2760 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2761 bnx2x_dcbx_init(bp, false);
2762
Merav Sicron55c11942012-11-07 00:45:48 +00002763 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2764
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002765 return 0;
2766
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002767#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002768load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002769 if (IS_PF(bp)) {
2770 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002771
Ariel Eliorad5afc82013-01-01 05:22:26 +00002772 /* Clean queueable objects */
2773 bnx2x_squeeze_objects(bp);
2774 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002775
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002776 /* Free SKBs, SGEs, TPA pool and driver internals */
2777 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002778 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002779 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002780
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002781 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002782 bnx2x_free_irq(bp);
2783load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002784 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002785 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2786 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2787 }
2788
2789 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002790load_error1:
2791 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002792 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002793
Ariel Elior889b9af2012-01-26 06:01:51 +00002794 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002795 if (IS_PF(bp))
2796 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002797load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002798 bnx2x_free_fp_mem(bp);
2799 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002800 bnx2x_free_mem(bp);
2801
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002802 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002803#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002804}
2805
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002806int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002807{
2808 u8 rc = 0, cos, i;
2809
2810 /* Wait until tx fastpath tasks complete */
2811 for_each_tx_queue(bp, i) {
2812 struct bnx2x_fastpath *fp = &bp->fp[i];
2813
2814 for_each_cos_in_tx_queue(fp, cos)
2815 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2816 if (rc)
2817 return rc;
2818 }
2819 return 0;
2820}
2821
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002822/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002823int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002824{
2825 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002826 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827
Merav Sicron55c11942012-11-07 00:45:48 +00002828 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2829
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002830 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002831 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002832 u32 val;
2833 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2834 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2835 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2836 }
2837
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002838 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002839 (bp->state == BNX2X_STATE_CLOSED ||
2840 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002841 /* We can get here if the driver has been unloaded
2842 * during parity error recovery and is either waiting for a
2843 * leader to complete or for other functions to unload and
2844 * then ifdown has been issued. In this case we want to
2845 * unload and let other functions to complete a recovery
2846 * process.
2847 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002848 bp->recovery_state = BNX2X_RECOVERY_DONE;
2849 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002850 bnx2x_release_leader_lock(bp);
2851 smp_mb();
2852
Merav Sicron51c1a582012-03-18 10:33:38 +00002853 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2854 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002855 return -EINVAL;
2856 }
2857
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002858 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002859 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002860 *
2861 * we can get here only after unsuccessful ndo_* callback, during which
2862 * dev->IFF_UP flag is still on.
2863 */
2864 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2865 return 0;
2866
2867 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002868 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2869 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2870 */
2871 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2872 smp_mb();
2873
Ariel Elior78c3bcc2013-06-20 17:39:08 +03002874 /* indicate to VFs that the PF is going down */
2875 bnx2x_iov_channel_down(bp);
2876
Merav Sicron55c11942012-11-07 00:45:48 +00002877 if (CNIC_LOADED(bp))
2878 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2879
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002880 /* Stop Tx */
2881 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002882 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002883
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002884 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002885
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002886 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002887
Ariel Eliorad5afc82013-01-01 05:22:26 +00002888 if (IS_PF(bp)) {
2889 /* Set ALWAYS_ALIVE bit in shmem */
2890 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2891 bnx2x_drv_pulse(bp);
2892 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2893 bnx2x_save_statistics(bp);
2894 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002895
Ariel Eliorad5afc82013-01-01 05:22:26 +00002896 /* wait till consumers catch up with producers in all queues */
2897 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002898
Ariel Elior9b176b62013-01-01 05:22:28 +00002899 /* if VF indicate to PF this function is going down (PF will delete sp
2900 * elements and clear initializations
2901 */
2902 if (IS_VF(bp))
2903 bnx2x_vfpf_close_vf(bp);
2904 else if (unload_mode != UNLOAD_RECOVERY)
2905 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002906 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002907 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002908 /* Send the UNLOAD_REQUEST to the MCP */
2909 bnx2x_send_unload_req(bp, unload_mode);
2910
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002911 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002912 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002913 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002914 * (the engine which leader will perform the recovery
2915 * last).
2916 */
2917 if (!CHIP_IS_E1x(bp))
2918 bnx2x_pf_disable(bp);
2919
2920 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002921 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002922 /* Delete all NAPI objects */
2923 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002924 if (CNIC_LOADED(bp))
2925 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002926 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002927 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002928
2929 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002930 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002931 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002932
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002933 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002934 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002935 * the queueable objects here in case they failed to get cleaned so far.
2936 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002937 if (IS_PF(bp))
2938 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002939
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002940 /* There should be no more pending SP commands at this stage */
2941 bp->sp_state = 0;
2942
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002943 bp->port.pmf = 0;
2944
2945 /* Free SKBs, SGEs, TPA pool and driver internals */
2946 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002947 if (CNIC_LOADED(bp))
2948 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002949 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002950 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002951
Ariel Eliorad5afc82013-01-01 05:22:26 +00002952 bnx2x_free_fp_mem(bp);
2953 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00002954 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002955
Ariel Eliorad5afc82013-01-01 05:22:26 +00002956 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002957 if (CNIC_LOADED(bp))
2958 bnx2x_free_mem_cnic(bp);
Yuval Mintz2f7a3122013-04-24 01:45:01 +00002959 bnx2x_free_mem(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002960 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002961 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00002962 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002963
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002964 /* Check if there are pending parity attentions. If there are - set
2965 * RECOVERY_IN_PROGRESS.
2966 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002967 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002968 bnx2x_set_reset_in_progress(bp);
2969
2970 /* Set RESET_IS_GLOBAL if needed */
2971 if (global)
2972 bnx2x_set_reset_global(bp);
2973 }
2974
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002975 /* The last driver must disable a "close the gate" if there is no
2976 * parity attention or "process kill" pending.
2977 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002978 if (IS_PF(bp) &&
2979 !bnx2x_clear_pf_load(bp) &&
2980 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002981 bnx2x_disable_close_the_gate(bp);
2982
Merav Sicron55c11942012-11-07 00:45:48 +00002983 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2984
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002985 return 0;
2986}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002987
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002988int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2989{
2990 u16 pmcsr;
2991
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002992 /* If there is no power capability, silently succeed */
2993 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002994 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002995 return 0;
2996 }
2997
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002998 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2999
3000 switch (state) {
3001 case PCI_D0:
3002 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3003 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3004 PCI_PM_CTRL_PME_STATUS));
3005
3006 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3007 /* delay required during transition out of D3hot */
3008 msleep(20);
3009 break;
3010
3011 case PCI_D3hot:
3012 /* If there are other clients above don't
3013 shut down the power */
3014 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3015 return 0;
3016 /* Don't shut down the power for emulation and FPGA */
3017 if (CHIP_REV_IS_SLOW(bp))
3018 return 0;
3019
3020 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3021 pmcsr |= 3;
3022
3023 if (bp->wol)
3024 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3025
3026 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3027 pmcsr);
3028
3029 /* No more memory access after this point until
3030 * device is brought back to D0.
3031 */
3032 break;
3033
3034 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003035 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003036 return -EINVAL;
3037 }
3038 return 0;
3039}
3040
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003041/*
3042 * net_device service functions
3043 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003044int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003045{
3046 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003047 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003048 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3049 napi);
3050 struct bnx2x *bp = fp->bp;
3051
3052 while (1) {
3053#ifdef BNX2X_STOP_ON_ERROR
3054 if (unlikely(bp->panic)) {
3055 napi_complete(napi);
3056 return 0;
3057 }
3058#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003059 if (!bnx2x_fp_lock_napi(fp))
3060 return work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003061
Ariel Elior6383c0b2011-07-14 08:31:57 +00003062 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003063 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3064 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003065
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003066 if (bnx2x_has_rx_work(fp)) {
3067 work_done += bnx2x_rx_int(fp, budget - work_done);
3068
3069 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003070 if (work_done >= budget) {
3071 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003072 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003073 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003074 }
3075
3076 /* Fall out from the NAPI loop if needed */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003077 if (!bnx2x_fp_unlock_napi(fp) &&
3078 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003079
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003080 /* No need to update SB for FCoE L2 ring as long as
3081 * it's connected to the default SB and the SB
3082 * has been updated when NAPI was scheduled.
3083 */
3084 if (IS_FCOE_FP(fp)) {
3085 napi_complete(napi);
3086 break;
3087 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003088 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003089 /* bnx2x_has_rx_work() reads the status block,
3090 * thus we need to ensure that status block indices
3091 * have been actually read (bnx2x_update_fpsb_idx)
3092 * prior to this check (bnx2x_has_rx_work) so that
3093 * we won't write the "newer" value of the status block
3094 * to IGU (if there was a DMA right after
3095 * bnx2x_has_rx_work and if there is no rmb, the memory
3096 * reading (bnx2x_update_fpsb_idx) may be postponed
3097 * to right before bnx2x_ack_sb). In this case there
3098 * will never be another interrupt until there is
3099 * another update of the status block, while there
3100 * is still unhandled work.
3101 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003102 rmb();
3103
3104 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3105 napi_complete(napi);
3106 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003107 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003108 "Update index to %d\n", fp->fp_hc_idx);
3109 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3110 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003111 IGU_INT_ENABLE, 1);
3112 break;
3113 }
3114 }
3115 }
3116
3117 return work_done;
3118}
3119
Cong Wange0d10952013-08-01 11:10:25 +08003120#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003121/* must be called with local_bh_disable()d */
3122int bnx2x_low_latency_recv(struct napi_struct *napi)
3123{
3124 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3125 napi);
3126 struct bnx2x *bp = fp->bp;
3127 int found = 0;
3128
3129 if ((bp->state == BNX2X_STATE_CLOSED) ||
3130 (bp->state == BNX2X_STATE_ERROR) ||
3131 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3132 return LL_FLUSH_FAILED;
3133
3134 if (!bnx2x_fp_lock_poll(fp))
3135 return LL_FLUSH_BUSY;
3136
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003137 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003138 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003139
3140 bnx2x_fp_unlock_poll(fp);
3141
3142 return found;
3143}
3144#endif
3145
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003146/* we split the first BD into headers and data BDs
3147 * to ease the pain of our fellow microcode engineers
3148 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003149 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003150static u16 bnx2x_tx_split(struct bnx2x *bp,
3151 struct bnx2x_fp_txdata *txdata,
3152 struct sw_tx_bd *tx_buf,
3153 struct eth_tx_start_bd **tx_bd, u16 hlen,
3154 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003155{
3156 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3157 struct eth_tx_bd *d_tx_bd;
3158 dma_addr_t mapping;
3159 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3160
3161 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003162 h_tx_bd->nbytes = cpu_to_le16(hlen);
3163
Dmitry Kravkov91226792013-03-11 05:17:52 +00003164 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3165 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003166
3167 /* now get a new data BD
3168 * (after the pbd) and fill it */
3169 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003170 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003171
3172 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3173 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3174
3175 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3176 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3177 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3178
3179 /* this marks the BD as one that has no individual mapping */
3180 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3181
3182 DP(NETIF_MSG_TX_QUEUED,
3183 "TSO split data size is %d (%x:%x)\n",
3184 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3185
3186 /* update tx_bd */
3187 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3188
3189 return bd_prod;
3190}
3191
Yuval Mintz86564c32013-01-23 03:21:50 +00003192#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3193#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003194static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003195{
Yuval Mintz86564c32013-01-23 03:21:50 +00003196 __sum16 tsum = (__force __sum16) csum;
3197
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003198 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003199 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3200 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003201
3202 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003203 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3204 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003205
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003206 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003207}
3208
Dmitry Kravkov91226792013-03-11 05:17:52 +00003209static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003210{
3211 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003212 __u8 prot = 0;
3213 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003214
3215 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003216 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003217
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003218 protocol = vlan_get_protocol(skb);
3219 if (protocol == htons(ETH_P_IPV6)) {
3220 rc = XMIT_CSUM_V6;
3221 prot = ipv6_hdr(skb)->nexthdr;
3222 } else {
3223 rc = XMIT_CSUM_V4;
3224 prot = ip_hdr(skb)->protocol;
3225 }
3226
3227 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3228 if (inner_ip_hdr(skb)->version == 6) {
3229 rc |= XMIT_CSUM_ENC_V6;
3230 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003231 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003232 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003233 rc |= XMIT_CSUM_ENC_V4;
3234 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003235 rc |= XMIT_CSUM_TCP;
3236 }
3237 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003238 if (prot == IPPROTO_TCP)
3239 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003240
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003241 if (skb_is_gso_v6(skb)) {
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003242 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003243 if (rc & XMIT_CSUM_ENC)
3244 rc |= XMIT_GSO_ENC_V6;
3245 } else if (skb_is_gso(skb)) {
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003246 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003247 if (rc & XMIT_CSUM_ENC)
3248 rc |= XMIT_GSO_ENC_V4;
3249 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003250
3251 return rc;
3252}
3253
3254#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3255/* check if packet requires linearization (packet is too fragmented)
3256 no need to check fragmentation if page size > 8K (there will be no
3257 violation to FW restrictions) */
3258static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3259 u32 xmit_type)
3260{
3261 int to_copy = 0;
3262 int hlen = 0;
3263 int first_bd_sz = 0;
3264
3265 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3266 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3267
3268 if (xmit_type & XMIT_GSO) {
3269 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3270 /* Check if LSO packet needs to be copied:
3271 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3272 int wnd_size = MAX_FETCH_BD - 3;
3273 /* Number of windows to check */
3274 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3275 int wnd_idx = 0;
3276 int frag_idx = 0;
3277 u32 wnd_sum = 0;
3278
3279 /* Headers length */
3280 hlen = (int)(skb_transport_header(skb) - skb->data) +
3281 tcp_hdrlen(skb);
3282
3283 /* Amount of data (w/o headers) on linear part of SKB*/
3284 first_bd_sz = skb_headlen(skb) - hlen;
3285
3286 wnd_sum = first_bd_sz;
3287
3288 /* Calculate the first sum - it's special */
3289 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3290 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003291 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003292
3293 /* If there was data on linear skb data - check it */
3294 if (first_bd_sz > 0) {
3295 if (unlikely(wnd_sum < lso_mss)) {
3296 to_copy = 1;
3297 goto exit_lbl;
3298 }
3299
3300 wnd_sum -= first_bd_sz;
3301 }
3302
3303 /* Others are easier: run through the frag list and
3304 check all windows */
3305 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3306 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003307 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003308
3309 if (unlikely(wnd_sum < lso_mss)) {
3310 to_copy = 1;
3311 break;
3312 }
3313 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003314 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003315 }
3316 } else {
3317 /* in non-LSO too fragmented packet should always
3318 be linearized */
3319 to_copy = 1;
3320 }
3321 }
3322
3323exit_lbl:
3324 if (unlikely(to_copy))
3325 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003326 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003327 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3328 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3329
3330 return to_copy;
3331}
3332#endif
3333
Dmitry Kravkov91226792013-03-11 05:17:52 +00003334static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3335 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003336{
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003337 struct ipv6hdr *ipv6;
3338
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003339 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3340 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3341 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003342
3343 if (xmit_type & XMIT_GSO_ENC_V6)
3344 ipv6 = inner_ipv6_hdr(skb);
3345 else if (xmit_type & XMIT_GSO_V6)
3346 ipv6 = ipv6_hdr(skb);
3347 else
3348 ipv6 = NULL;
3349
3350 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003351 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003352}
3353
3354/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003355 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003356 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003357 * @skb: packet skb
3358 * @pbd: parse BD
3359 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003360 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003361static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3362 struct eth_tx_parse_bd_e1x *pbd,
Yuval Mintz057cf652013-05-19 04:41:01 +00003363 struct eth_tx_start_bd *tx_start_bd,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003364 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003365{
3366 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003367 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003368 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003369
3370 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003371 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003372 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003373 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3374 ip_hdr(skb)->daddr,
3375 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003376
Yuval Mintz057cf652013-05-19 04:41:01 +00003377 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3378 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3379 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003380 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003381 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3382 &ipv6_hdr(skb)->daddr,
3383 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003384 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003385
Yuval Mintz86564c32013-01-23 03:21:50 +00003386 pbd->global_data |=
3387 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003388}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003389
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003390/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003391 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3392 *
3393 * @bp: driver handle
3394 * @skb: packet skb
3395 * @parsing_data: data to be updated
3396 * @xmit_type: xmit flags
3397 *
3398 * 57712/578xx related, when skb has encapsulation
3399 */
3400static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3401 u32 *parsing_data, u32 xmit_type)
3402{
3403 *parsing_data |=
3404 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3405 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3406 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3407
3408 if (xmit_type & XMIT_CSUM_TCP) {
3409 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3410 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3411 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3412
3413 return skb_inner_transport_header(skb) +
3414 inner_tcp_hdrlen(skb) - skb->data;
3415 }
3416
3417 /* We support checksum offload for TCP and UDP only.
3418 * No need to pass the UDP header length - it's a constant.
3419 */
3420 return skb_inner_transport_header(skb) +
3421 sizeof(struct udphdr) - skb->data;
3422}
3423
3424/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003425 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003426 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003427 * @bp: driver handle
3428 * @skb: packet skb
3429 * @parsing_data: data to be updated
3430 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003431 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003432 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003433 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003434static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3435 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003436{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003437 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003438 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003439 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3440 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003441
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003442 if (xmit_type & XMIT_CSUM_TCP) {
3443 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3444 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3445 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003446
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003447 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003448 }
3449 /* We support checksum offload for TCP and UDP only.
3450 * No need to pass the UDP header length - it's a constant.
3451 */
3452 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003453}
3454
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003455/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003456static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3457 struct eth_tx_start_bd *tx_start_bd,
3458 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003459{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003460 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3461
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003462 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003463 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003464
3465 if (!(xmit_type & XMIT_CSUM_TCP))
3466 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003467}
3468
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003469/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003470 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003471 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003472 * @bp: driver handle
3473 * @skb: packet skb
3474 * @pbd: parse BD to be updated
3475 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003476 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003477static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3478 struct eth_tx_parse_bd_e1x *pbd,
3479 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003480{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003481 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003482
3483 /* for now NS flag is not used in Linux */
3484 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003485 cpu_to_le16(hlen |
3486 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3487 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003488
3489 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003490 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003491
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003492 hlen += pbd->ip_hlen_w;
3493
3494 /* We support checksum offload for TCP and UDP only */
3495 if (xmit_type & XMIT_CSUM_TCP)
3496 hlen += tcp_hdrlen(skb) / 2;
3497 else
3498 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003499
3500 pbd->total_hlen_w = cpu_to_le16(hlen);
3501 hlen = hlen*2;
3502
3503 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003504 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003505
3506 } else {
3507 s8 fix = SKB_CS_OFF(skb); /* signed! */
3508
3509 DP(NETIF_MSG_TX_QUEUED,
3510 "hlen %d fix %d csum before fix %x\n",
3511 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3512
3513 /* HW bug: fixup the CSUM */
3514 pbd->tcp_pseudo_csum =
3515 bnx2x_csum_fix(skb_transport_header(skb),
3516 SKB_CS(skb), fix);
3517
3518 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3519 pbd->tcp_pseudo_csum);
3520 }
3521
3522 return hlen;
3523}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003524
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003525static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3526 struct eth_tx_parse_bd_e2 *pbd_e2,
3527 struct eth_tx_parse_2nd_bd *pbd2,
3528 u16 *global_data,
3529 u32 xmit_type)
3530{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003531 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003532 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003533
Dmitry Kravkove287a752013-03-21 15:38:24 +00003534 /* from outer IP to transport */
3535 hlen_w = (skb_inner_transport_header(skb) -
3536 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003537
3538 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003539 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003540
Dmitry Kravkove287a752013-03-21 15:38:24 +00003541 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003542
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003543 /* outer IP header info */
3544 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003545 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003546 u32 csum = (__force u32)(~iph->check) -
3547 (__force u32)iph->tot_len -
3548 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003549
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003550 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003551 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003552 } else {
3553 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003554 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003555 }
3556
3557 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3558
3559 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3560
3561 if (xmit_type & XMIT_GSO_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003562 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003563
3564 pbd_e2->data.tunnel_data.pseudo_csum =
3565 bswab16(~csum_tcpudp_magic(
3566 inner_ip_hdr(skb)->saddr,
3567 inner_ip_hdr(skb)->daddr,
3568 0, IPPROTO_TCP, 0));
3569
3570 outerip_len = ip_hdr(skb)->ihl << 1;
3571 } else {
3572 pbd_e2->data.tunnel_data.pseudo_csum =
3573 bswab16(~csum_ipv6_magic(
3574 &inner_ipv6_hdr(skb)->saddr,
3575 &inner_ipv6_hdr(skb)->daddr,
3576 0, IPPROTO_TCP, 0));
3577 }
3578
3579 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3580
3581 *global_data |=
3582 outerip_off |
3583 (!!(xmit_type & XMIT_CSUM_V6) <<
3584 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3585 (outerip_len <<
3586 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3587 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3588 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003589
3590 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3591 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3592 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3593 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003594}
3595
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003596/* called with netif_tx_lock
3597 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3598 * netif_wake_queue()
3599 */
3600netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3601{
3602 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003603
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003604 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003605 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003606 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003607 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003608 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003609 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003610 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003611 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003612 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003613 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003614 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003615 dma_addr_t mapping;
3616 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3617 int i;
3618 u8 hlen = 0;
3619 __le16 pkt_size = 0;
3620 struct ethhdr *eth;
3621 u8 mac_type = UNICAST_ADDRESS;
3622
3623#ifdef BNX2X_STOP_ON_ERROR
3624 if (unlikely(bp->panic))
3625 return NETDEV_TX_BUSY;
3626#endif
3627
Ariel Elior6383c0b2011-07-14 08:31:57 +00003628 txq_index = skb_get_queue_mapping(skb);
3629 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003630
Merav Sicron55c11942012-11-07 00:45:48 +00003631 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003632
Merav Sicron65565882012-06-19 07:48:26 +00003633 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003634
3635 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003636 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003637 txq_index, fp_index, txdata_index); */
3638
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003639 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003640 DP(NETIF_MSG_TX_QUEUED,
3641 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003642 txdata->cid, fp_index, txdata_index, txdata, fp); */
3643
3644 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003645 skb_shinfo(skb)->nr_frags +
3646 BDS_PER_TX_PKT +
3647 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003648 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003649 if (txdata->tx_ring_size == 0) {
3650 struct bnx2x_eth_q_stats *q_stats =
3651 bnx2x_fp_qstats(bp, txdata->parent_fp);
3652 q_stats->driver_filtered_tx_pkt++;
3653 dev_kfree_skb(skb);
3654 return NETDEV_TX_OK;
3655 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003656 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3657 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003658 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003659
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003660 return NETDEV_TX_BUSY;
3661 }
3662
Merav Sicron51c1a582012-03-18 10:33:38 +00003663 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003664 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003665 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003666 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3667 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003668
3669 eth = (struct ethhdr *)skb->data;
3670
3671 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3672 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3673 if (is_broadcast_ether_addr(eth->h_dest))
3674 mac_type = BROADCAST_ADDRESS;
3675 else
3676 mac_type = MULTICAST_ADDRESS;
3677 }
3678
Dmitry Kravkov91226792013-03-11 05:17:52 +00003679#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003680 /* First, check if we need to linearize the skb (due to FW
3681 restrictions). No need to check fragmentation if page size > 8K
3682 (there will be no violation to FW restrictions) */
3683 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3684 /* Statistics of linearization */
3685 bp->lin_cnt++;
3686 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003687 DP(NETIF_MSG_TX_QUEUED,
3688 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003689 dev_kfree_skb_any(skb);
3690 return NETDEV_TX_OK;
3691 }
3692 }
3693#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003694 /* Map skb linear data for DMA */
3695 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3696 skb_headlen(skb), DMA_TO_DEVICE);
3697 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003698 DP(NETIF_MSG_TX_QUEUED,
3699 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003700 dev_kfree_skb_any(skb);
3701 return NETDEV_TX_OK;
3702 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003703 /*
3704 Please read carefully. First we use one BD which we mark as start,
3705 then we have a parsing info BD (used for TSO or xsum),
3706 and only then we have the rest of the TSO BDs.
3707 (don't forget to mark the last one as last,
3708 and to unmap only AFTER you write to the BD ...)
3709 And above all, all pdb sizes are in words - NOT DWORDS!
3710 */
3711
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003712 /* get current pkt produced now - advance it just before sending packet
3713 * since mapping of pages may fail and cause packet to be dropped
3714 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003715 pkt_prod = txdata->tx_pkt_prod;
3716 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003717
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003718 /* get a tx_buf and first BD
3719 * tx_start_bd may be changed during SPLIT,
3720 * but first_bd will always stay first
3721 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003722 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3723 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003724 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003725
3726 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003727
Dmitry Kravkov91226792013-03-11 05:17:52 +00003728 /* header nbd: indirectly zero other flags! */
3729 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003730
3731 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003732 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003733 tx_buf->skb = skb;
3734 tx_buf->flags = 0;
3735
3736 DP(NETIF_MSG_TX_QUEUED,
3737 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003738 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003739
Jesse Grosseab6d182010-10-20 13:56:03 +00003740 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003741 tx_start_bd->vlan_or_ethertype =
3742 cpu_to_le16(vlan_tx_tag_get(skb));
3743 tx_start_bd->bd_flags.as_bitfield |=
3744 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003745 } else {
3746 /* when transmitting in a vf, start bd must hold the ethertype
3747 * for fw to enforce it
3748 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003749 if (IS_VF(bp))
Ariel Eliordc1ba592013-01-01 05:22:30 +00003750 tx_start_bd->vlan_or_ethertype =
3751 cpu_to_le16(ntohs(eth->h_proto));
Dmitry Kravkov91226792013-03-11 05:17:52 +00003752 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003753 /* used by FW for packet accounting */
3754 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003755 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003756
Dmitry Kravkov91226792013-03-11 05:17:52 +00003757 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3758
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003759 /* turn on parsing and get a BD */
3760 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003761
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003762 if (xmit_type & XMIT_CSUM)
3763 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003764
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003765 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003766 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003767 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003768
3769 if (xmit_type & XMIT_CSUM_ENC) {
3770 u16 global_data = 0;
3771
3772 /* Set PBD in enc checksum offload case */
3773 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3774 &pbd_e2_parsing_data,
3775 xmit_type);
3776
3777 /* turn on 2nd parsing and get a BD */
3778 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3779
3780 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3781
3782 memset(pbd2, 0, sizeof(*pbd2));
3783
3784 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3785 (skb_inner_network_header(skb) -
3786 skb->data) >> 1;
3787
3788 if (xmit_type & XMIT_GSO_ENC)
3789 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3790 &global_data,
3791 xmit_type);
3792
3793 pbd2->global_data = cpu_to_le16(global_data);
3794
3795 /* add addition parse BD indication to start BD */
3796 SET_FLAG(tx_start_bd->general_data,
3797 ETH_TX_START_BD_PARSE_NBDS, 1);
3798 /* set encapsulation flag in start BD */
3799 SET_FLAG(tx_start_bd->general_data,
3800 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3801 nbd++;
3802 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003803 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003804 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3805 &pbd_e2_parsing_data,
3806 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003807 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003808
Dmitry Kravkov91226792013-03-11 05:17:52 +00003809 /* Add the macs to the parsing BD this is a vf */
3810 if (IS_VF(bp)) {
3811 /* override GRE parameters in BD */
3812 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3813 &pbd_e2->data.mac_addr.src_mid,
3814 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003815 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003816
3817 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3818 &pbd_e2->data.mac_addr.dst_mid,
3819 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003820 eth->h_dest);
3821 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003822
3823 SET_FLAG(pbd_e2_parsing_data,
3824 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003825 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003826 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003827 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003828 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3829 /* Set PBD in checksum offload case */
3830 if (xmit_type & XMIT_CSUM)
3831 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003832
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003833 SET_FLAG(global_data,
3834 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3835 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003836 }
3837
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003838 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003839 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3840 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003841 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3842 pkt_size = tx_start_bd->nbytes;
3843
Merav Sicron51c1a582012-03-18 10:33:38 +00003844 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003845 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003846 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003847 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003848 tx_start_bd->bd_flags.as_bitfield,
3849 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003850
3851 if (xmit_type & XMIT_GSO) {
3852
3853 DP(NETIF_MSG_TX_QUEUED,
3854 "TSO packet len %d hlen %d total len %d tso size %d\n",
3855 skb->len, hlen, skb_headlen(skb),
3856 skb_shinfo(skb)->gso_size);
3857
3858 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3859
Dmitry Kravkov91226792013-03-11 05:17:52 +00003860 if (unlikely(skb_headlen(skb) > hlen)) {
3861 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003862 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3863 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003864 bd_prod);
3865 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003866 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003867 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3868 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003869 else
Yuval Mintz44dbc782013-06-03 02:59:57 +00003870 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003871 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003872
3873 /* Set the PBD's parsing_data field if not zero
3874 * (for the chips newer than 57711).
3875 */
3876 if (pbd_e2_parsing_data)
3877 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3878
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003879 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3880
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003881 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003882 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3884
Eric Dumazet9e903e02011-10-18 21:00:24 +00003885 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3886 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003887 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003888 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003889
Merav Sicron51c1a582012-03-18 10:33:38 +00003890 DP(NETIF_MSG_TX_QUEUED,
3891 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003892
3893 /* we need unmap all buffers already mapped
3894 * for this SKB;
3895 * first_bd->nbd need to be properly updated
3896 * before call to bnx2x_free_tx_pkt
3897 */
3898 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003899 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003900 TX_BD(txdata->tx_pkt_prod),
3901 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003902 return NETDEV_TX_OK;
3903 }
3904
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003905 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003906 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003907 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003908 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003910 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3911 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003912 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3913 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003914 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003915
3916 DP(NETIF_MSG_TX_QUEUED,
3917 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3918 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3919 le16_to_cpu(tx_data_bd->nbytes));
3920 }
3921
3922 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3923
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003924 /* update with actual num BDs */
3925 first_bd->nbd = cpu_to_le16(nbd);
3926
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003927 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3928
3929 /* now send a tx doorbell, counting the next BD
3930 * if the packet contains or ends with it
3931 */
3932 if (TX_BD_POFF(bd_prod) < nbd)
3933 nbd++;
3934
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003935 /* total_pkt_bytes should be set on the first data BD if
3936 * it's not an LSO packet and there is more than one
3937 * data BD. In this case pkt_size is limited by an MTU value.
3938 * However we prefer to set it for an LSO packet (while we don't
3939 * have to) in order to save some CPU cycles in a none-LSO
3940 * case, when we much more care about them.
3941 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003942 if (total_pkt_bd != NULL)
3943 total_pkt_bd->total_pkt_bytes = pkt_size;
3944
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003945 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003946 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003947 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003948 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3949 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3950 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3951 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003952 if (pbd_e2)
3953 DP(NETIF_MSG_TX_QUEUED,
3954 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00003955 pbd_e2,
3956 pbd_e2->data.mac_addr.dst_hi,
3957 pbd_e2->data.mac_addr.dst_mid,
3958 pbd_e2->data.mac_addr.dst_lo,
3959 pbd_e2->data.mac_addr.src_hi,
3960 pbd_e2->data.mac_addr.src_mid,
3961 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003962 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003963 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3964
Tom Herbert2df1a702011-11-28 16:33:37 +00003965 netdev_tx_sent_queue(txq, skb->len);
3966
Willem de Bruijn8373c572012-04-27 09:04:06 +00003967 skb_tx_timestamp(skb);
3968
Ariel Elior6383c0b2011-07-14 08:31:57 +00003969 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003970 /*
3971 * Make sure that the BD data is updated before updating the producer
3972 * since FW might read the BD right after the producer is updated.
3973 * This is only applicable for weak-ordered memory model archs such
3974 * as IA-64. The following barrier is also mandatory since FW will
3975 * assumes packets must have BDs.
3976 */
3977 wmb();
3978
Ariel Elior6383c0b2011-07-14 08:31:57 +00003979 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003980 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003981
Ariel Elior6383c0b2011-07-14 08:31:57 +00003982 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003983
3984 mmiowb();
3985
Ariel Elior6383c0b2011-07-14 08:31:57 +00003986 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003987
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003988 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003989 netif_tx_stop_queue(txq);
3990
3991 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3992 * ordering of set_bit() in netif_tx_stop_queue() and read of
3993 * fp->bd_tx_cons */
3994 smp_mb();
3995
Barak Witkowski15192a82012-06-19 07:48:28 +00003996 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003997 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003998 netif_tx_wake_queue(txq);
3999 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004000 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004001
4002 return NETDEV_TX_OK;
4003}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004004
Ariel Elior6383c0b2011-07-14 08:31:57 +00004005/**
4006 * bnx2x_setup_tc - routine to configure net_device for multi tc
4007 *
4008 * @netdev: net device to configure
4009 * @tc: number of traffic classes to enable
4010 *
4011 * callback connected to the ndo_setup_tc function pointer
4012 */
4013int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4014{
4015 int cos, prio, count, offset;
4016 struct bnx2x *bp = netdev_priv(dev);
4017
4018 /* setup tc must be called under rtnl lock */
4019 ASSERT_RTNL();
4020
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004021 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004022 if (!num_tc) {
4023 netdev_reset_tc(dev);
4024 return 0;
4025 }
4026
4027 /* requested to support too many traffic classes */
4028 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004029 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004030 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004031 return -EINVAL;
4032 }
4033
4034 /* declare amount of supported traffic classes */
4035 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004036 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004037 return -EINVAL;
4038 }
4039
4040 /* configure priority to traffic class mapping */
4041 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4042 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004043 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4044 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004045 prio, bp->prio_to_cos[prio]);
4046 }
4047
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004048 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004049 This can be used for ets or pfc, and save the effort of setting
4050 up a multio class queue disc or negotiating DCBX with a switch
4051 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004052 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004053 for (prio = 1; prio < 16; prio++) {
4054 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004055 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004056 } */
4057
4058 /* configure traffic class to transmission queue mapping */
4059 for (cos = 0; cos < bp->max_cos; cos++) {
4060 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004061 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004062 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004063 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4064 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004065 cos, offset, count);
4066 }
4067
4068 return 0;
4069}
4070
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004071/* called with rtnl_lock */
4072int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4073{
4074 struct sockaddr *addr = p;
4075 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004076 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004077
Merav Sicron51c1a582012-03-18 10:33:38 +00004078 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4079 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004080 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004081 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004082
Barak Witkowskia3348722012-04-23 03:04:46 +00004083 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4084 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004085 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004086 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004087 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004088
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004089 if (netif_running(dev)) {
4090 rc = bnx2x_set_eth_mac(bp, false);
4091 if (rc)
4092 return rc;
4093 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004094
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004095 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4096
4097 if (netif_running(dev))
4098 rc = bnx2x_set_eth_mac(bp, true);
4099
4100 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004101}
4102
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004103static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4104{
4105 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4106 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004107 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004108
4109 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004110
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004111 if (IS_FCOE_IDX(fp_index)) {
4112 memset(sb, 0, sizeof(union host_hc_status_block));
4113 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004114 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004115 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004116 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004117 BNX2X_PCI_FREE(sb->e2_sb,
4118 bnx2x_fp(bp, fp_index,
4119 status_blk_mapping),
4120 sizeof(struct host_hc_status_block_e2));
4121 else
4122 BNX2X_PCI_FREE(sb->e1x_sb,
4123 bnx2x_fp(bp, fp_index,
4124 status_blk_mapping),
4125 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004126 }
Merav Sicron55c11942012-11-07 00:45:48 +00004127
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004128 /* Rx */
4129 if (!skip_rx_queue(bp, fp_index)) {
4130 bnx2x_free_rx_bds(fp);
4131
4132 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4133 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4134 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4135 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4136 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4137
4138 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4139 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4140 sizeof(struct eth_fast_path_rx_cqe) *
4141 NUM_RCQ_BD);
4142
4143 /* SGE ring */
4144 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4145 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4146 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4147 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4148 }
4149
4150 /* Tx */
4151 if (!skip_tx_queue(bp, fp_index)) {
4152 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004153 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004154 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004155
Merav Sicron51c1a582012-03-18 10:33:38 +00004156 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004157 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004158 fp_index, cos, txdata->cid);
4159
4160 BNX2X_FREE(txdata->tx_buf_ring);
4161 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4162 txdata->tx_desc_mapping,
4163 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4164 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004165 }
4166 /* end of fastpath */
4167}
4168
Merav Sicron55c11942012-11-07 00:45:48 +00004169void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4170{
4171 int i;
4172 for_each_cnic_queue(bp, i)
4173 bnx2x_free_fp_mem_at(bp, i);
4174}
4175
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004176void bnx2x_free_fp_mem(struct bnx2x *bp)
4177{
4178 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004179 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004180 bnx2x_free_fp_mem_at(bp, i);
4181}
4182
Eric Dumazet1191cb82012-04-27 21:39:21 +00004183static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004184{
4185 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004186 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004187 bnx2x_fp(bp, index, sb_index_values) =
4188 (__le16 *)status_blk.e2_sb->sb.index_values;
4189 bnx2x_fp(bp, index, sb_running_index) =
4190 (__le16 *)status_blk.e2_sb->sb.running_index;
4191 } else {
4192 bnx2x_fp(bp, index, sb_index_values) =
4193 (__le16 *)status_blk.e1x_sb->sb.index_values;
4194 bnx2x_fp(bp, index, sb_running_index) =
4195 (__le16 *)status_blk.e1x_sb->sb.running_index;
4196 }
4197}
4198
Eric Dumazet1191cb82012-04-27 21:39:21 +00004199/* Returns the number of actually allocated BDs */
4200static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4201 int rx_ring_size)
4202{
4203 struct bnx2x *bp = fp->bp;
4204 u16 ring_prod, cqe_ring_prod;
4205 int i, failure_cnt = 0;
4206
4207 fp->rx_comp_cons = 0;
4208 cqe_ring_prod = ring_prod = 0;
4209
4210 /* This routine is called only during fo init so
4211 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4212 */
4213 for (i = 0; i < rx_ring_size; i++) {
4214 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4215 failure_cnt++;
4216 continue;
4217 }
4218 ring_prod = NEXT_RX_IDX(ring_prod);
4219 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4220 WARN_ON(ring_prod <= (i - failure_cnt));
4221 }
4222
4223 if (failure_cnt)
4224 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4225 i - failure_cnt, fp->index);
4226
4227 fp->rx_bd_prod = ring_prod;
4228 /* Limit the CQE producer by the CQE ring size */
4229 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4230 cqe_ring_prod);
4231 fp->rx_pkt = fp->rx_calls = 0;
4232
Barak Witkowski15192a82012-06-19 07:48:28 +00004233 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004234
4235 return i - failure_cnt;
4236}
4237
4238static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4239{
4240 int i;
4241
4242 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4243 struct eth_rx_cqe_next_page *nextpg;
4244
4245 nextpg = (struct eth_rx_cqe_next_page *)
4246 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4247 nextpg->addr_hi =
4248 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4249 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4250 nextpg->addr_lo =
4251 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4252 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4253 }
4254}
4255
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004256static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4257{
4258 union host_hc_status_block *sb;
4259 struct bnx2x_fastpath *fp = &bp->fp[index];
4260 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004261 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004262 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004263
Barak Witkowskia3348722012-04-23 03:04:46 +00004264 if (!bp->rx_ring_size &&
4265 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004266 rx_ring_size = MIN_RX_SIZE_NONTPA;
4267 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004268 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004269 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4270
Yuval Mintz065f8b92012-10-03 04:22:59 +00004271 if (CHIP_IS_E3(bp)) {
4272 u32 cfg = SHMEM_RD(bp,
4273 dev_info.port_hw_config[BP_PORT(bp)].
4274 default_cfg);
4275
4276 /* Decrease ring size for 1G functions */
4277 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4278 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4279 rx_ring_size /= 10;
4280 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004281
David S. Miller8decf862011-09-22 03:23:13 -04004282 /* allocate at least number of buffers required by FW */
4283 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4284 MIN_RX_SIZE_TPA, rx_ring_size);
4285
4286 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004287 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004288 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004289
Yuval Mintz04c46732013-01-23 03:21:46 +00004290 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4291
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004292 /* Common */
4293 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004294
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004295 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004296 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004297 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004298 BNX2X_PCI_ALLOC(sb->e2_sb,
4299 &bnx2x_fp(bp, index, status_blk_mapping),
4300 sizeof(struct host_hc_status_block_e2));
4301 else
4302 BNX2X_PCI_ALLOC(sb->e1x_sb,
4303 &bnx2x_fp(bp, index, status_blk_mapping),
4304 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004305 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004306
4307 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4308 * set shortcuts for it.
4309 */
4310 if (!IS_FCOE_IDX(index))
4311 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004312
4313 /* Tx */
4314 if (!skip_tx_queue(bp, index)) {
4315 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004316 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004317 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004318
Merav Sicron51c1a582012-03-18 10:33:38 +00004319 DP(NETIF_MSG_IFUP,
4320 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004321 index, cos);
4322
4323 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004324 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004325 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4326 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004327 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004328 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004329 }
4330
4331 /* Rx */
4332 if (!skip_rx_queue(bp, index)) {
4333 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4334 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4335 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4336 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4337 &bnx2x_fp(bp, index, rx_desc_mapping),
4338 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4339
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004340 /* Seed all CQEs by 1s */
4341 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4342 &bnx2x_fp(bp, index, rx_comp_mapping),
4343 sizeof(struct eth_fast_path_rx_cqe) *
4344 NUM_RCQ_BD);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004345
4346 /* SGE ring */
4347 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4348 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4349 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4350 &bnx2x_fp(bp, index, rx_sge_mapping),
4351 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4352 /* RX BD ring */
4353 bnx2x_set_next_page_rx_bd(fp);
4354
4355 /* CQ ring */
4356 bnx2x_set_next_page_rx_cq(fp);
4357
4358 /* BDs */
4359 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4360 if (ring_size < rx_ring_size)
4361 goto alloc_mem_err;
4362 }
4363
4364 return 0;
4365
4366/* handles low memory cases */
4367alloc_mem_err:
4368 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4369 index, ring_size);
4370 /* FW will drop all packets if queue is not big enough,
4371 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004372 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004373 */
4374 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004375 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004376 /* release memory allocated for this queue */
4377 bnx2x_free_fp_mem_at(bp, index);
4378 return -ENOMEM;
4379 }
4380 return 0;
4381}
4382
Merav Sicron55c11942012-11-07 00:45:48 +00004383int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004384{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004385 if (!NO_FCOE(bp))
4386 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004387 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004388 /* we will fail load process instead of mark
4389 * NO_FCOE_FLAG
4390 */
4391 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004392
4393 return 0;
4394}
4395
4396int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4397{
4398 int i;
4399
4400 /* 1. Allocate FP for leading - fatal if error
4401 * 2. Allocate RSS - fix number of queues if error
4402 */
4403
4404 /* leading */
4405 if (bnx2x_alloc_fp_mem_at(bp, 0))
4406 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004407
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004408 /* RSS */
4409 for_each_nondefault_eth_queue(bp, i)
4410 if (bnx2x_alloc_fp_mem_at(bp, i))
4411 break;
4412
4413 /* handle memory failures */
4414 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4415 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4416
4417 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004418 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004419 if (CNIC_SUPPORT(bp))
4420 /* move non eth FPs next to last eth FP
4421 * must be done in that order
4422 * FCOE_IDX < FWD_IDX < OOO_IDX
4423 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004424
Merav Sicron55c11942012-11-07 00:45:48 +00004425 /* move FCoE fp even NO_FCOE_FLAG is on */
4426 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4427 bp->num_ethernet_queues -= delta;
4428 bp->num_queues = bp->num_ethernet_queues +
4429 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004430 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4431 bp->num_queues + delta, bp->num_queues);
4432 }
4433
4434 return 0;
4435}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004436
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004437void bnx2x_free_mem_bp(struct bnx2x *bp)
4438{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004439 int i;
4440
4441 for (i = 0; i < bp->fp_array_size; i++)
4442 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004443 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004444 kfree(bp->sp_objs);
4445 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004446 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004447 kfree(bp->msix_table);
4448 kfree(bp->ilt);
4449}
4450
Bill Pemberton0329aba2012-12-03 09:24:24 -05004451int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004452{
4453 struct bnx2x_fastpath *fp;
4454 struct msix_entry *tbl;
4455 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004456 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004457 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004458 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004459
Ariel Elior6383c0b2011-07-14 08:31:57 +00004460 /*
4461 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004462 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004463 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004464 msix_table_size = bp->igu_sb_cnt;
4465 if (IS_PF(bp))
4466 msix_table_size++;
4467 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004468
4469 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004470 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004471 bp->fp_array_size = fp_array_size;
4472 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004473
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004474 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004475 if (!fp)
4476 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004477 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004478 fp[i].tpa_info =
4479 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4480 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4481 if (!(fp[i].tpa_info))
4482 goto alloc_err;
4483 }
4484
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004485 bp->fp = fp;
4486
Barak Witkowski15192a82012-06-19 07:48:28 +00004487 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004488 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004489 GFP_KERNEL);
4490 if (!bp->sp_objs)
4491 goto alloc_err;
4492
4493 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004494 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004495 GFP_KERNEL);
4496 if (!bp->fp_stats)
4497 goto alloc_err;
4498
Merav Sicron65565882012-06-19 07:48:26 +00004499 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004500 txq_array_size =
4501 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4502 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4503
4504 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4505 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004506 if (!bp->bnx2x_txq)
4507 goto alloc_err;
4508
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004509 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004510 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004511 if (!tbl)
4512 goto alloc_err;
4513 bp->msix_table = tbl;
4514
4515 /* ilt */
4516 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4517 if (!ilt)
4518 goto alloc_err;
4519 bp->ilt = ilt;
4520
4521 return 0;
4522alloc_err:
4523 bnx2x_free_mem_bp(bp);
4524 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004525}
4526
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004527int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004528{
4529 struct bnx2x *bp = netdev_priv(dev);
4530
4531 if (unlikely(!netif_running(dev)))
4532 return 0;
4533
Yuval Mintz5d07d862012-09-13 02:56:21 +00004534 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004535 return bnx2x_nic_load(bp, LOAD_NORMAL);
4536}
4537
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004538int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4539{
4540 u32 sel_phy_idx = 0;
4541 if (bp->link_params.num_phys <= 1)
4542 return INT_PHY;
4543
4544 if (bp->link_vars.link_up) {
4545 sel_phy_idx = EXT_PHY1;
4546 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4547 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4548 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4549 sel_phy_idx = EXT_PHY2;
4550 } else {
4551
4552 switch (bnx2x_phy_selection(&bp->link_params)) {
4553 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4554 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4555 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4556 sel_phy_idx = EXT_PHY1;
4557 break;
4558 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4559 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4560 sel_phy_idx = EXT_PHY2;
4561 break;
4562 }
4563 }
4564
4565 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004566}
4567int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4568{
4569 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4570 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004571 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004572 * swapping is enabled). So when swapping is enabled, we need to reverse
4573 * the configuration
4574 */
4575
4576 if (bp->link_params.multi_phy_config &
4577 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4578 if (sel_phy_idx == EXT_PHY1)
4579 sel_phy_idx = EXT_PHY2;
4580 else if (sel_phy_idx == EXT_PHY2)
4581 sel_phy_idx = EXT_PHY1;
4582 }
4583 return LINK_CONFIG_IDX(sel_phy_idx);
4584}
4585
Merav Sicron55c11942012-11-07 00:45:48 +00004586#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004587int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4588{
4589 struct bnx2x *bp = netdev_priv(dev);
4590 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4591
4592 switch (type) {
4593 case NETDEV_FCOE_WWNN:
4594 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4595 cp->fcoe_wwn_node_name_lo);
4596 break;
4597 case NETDEV_FCOE_WWPN:
4598 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4599 cp->fcoe_wwn_port_name_lo);
4600 break;
4601 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004602 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004603 return -EINVAL;
4604 }
4605
4606 return 0;
4607}
4608#endif
4609
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004610/* called with rtnl_lock */
4611int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4612{
4613 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004614
4615 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004616 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004617 return -EAGAIN;
4618 }
4619
4620 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004621 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4622 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004623 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004624 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004625
4626 /* This does not race with packet allocation
4627 * because the actual alloc size is
4628 * only updated as part of load
4629 */
4630 dev->mtu = new_mtu;
4631
Michał Mirosław66371c42011-04-12 09:38:23 +00004632 return bnx2x_reload_if_running(dev);
4633}
4634
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004635netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004636 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004637{
4638 struct bnx2x *bp = netdev_priv(dev);
4639
4640 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004641 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004642 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004643 features &= ~NETIF_F_GRO;
4644 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004645
4646 return features;
4647}
4648
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004649int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004650{
4651 struct bnx2x *bp = netdev_priv(dev);
4652 u32 flags = bp->flags;
Eric Dumazet8802f572013-05-18 07:14:53 +00004653 u32 changes;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004654 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004655
4656 if (features & NETIF_F_LRO)
4657 flags |= TPA_ENABLE_FLAG;
4658 else
4659 flags &= ~TPA_ENABLE_FLAG;
4660
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004661 if (features & NETIF_F_GRO)
4662 flags |= GRO_ENABLE_FLAG;
4663 else
4664 flags &= ~GRO_ENABLE_FLAG;
4665
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004666 if (features & NETIF_F_LOOPBACK) {
4667 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4668 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4669 bnx2x_reload = true;
4670 }
4671 } else {
4672 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4673 bp->link_params.loopback_mode = LOOPBACK_NONE;
4674 bnx2x_reload = true;
4675 }
4676 }
4677
Eric Dumazet8802f572013-05-18 07:14:53 +00004678 changes = flags ^ bp->flags;
4679
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004680 /* if GRO is changed while LRO is enabled, don't force a reload */
Eric Dumazet8802f572013-05-18 07:14:53 +00004681 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4682 changes &= ~GRO_ENABLE_FLAG;
4683
4684 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004685 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004686
4687 bp->flags = flags;
Michał Mirosław66371c42011-04-12 09:38:23 +00004688
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004689 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004690 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4691 return bnx2x_reload_if_running(dev);
4692 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004693 }
4694
Michał Mirosław66371c42011-04-12 09:38:23 +00004695 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004696}
4697
4698void bnx2x_tx_timeout(struct net_device *dev)
4699{
4700 struct bnx2x *bp = netdev_priv(dev);
4701
4702#ifdef BNX2X_STOP_ON_ERROR
4703 if (!bp->panic)
4704 bnx2x_panic();
4705#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004706
4707 smp_mb__before_clear_bit();
4708 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4709 smp_mb__after_clear_bit();
4710
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004711 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004712 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004713}
4714
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004715int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4716{
4717 struct net_device *dev = pci_get_drvdata(pdev);
4718 struct bnx2x *bp;
4719
4720 if (!dev) {
4721 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4722 return -ENODEV;
4723 }
4724 bp = netdev_priv(dev);
4725
4726 rtnl_lock();
4727
4728 pci_save_state(pdev);
4729
4730 if (!netif_running(dev)) {
4731 rtnl_unlock();
4732 return 0;
4733 }
4734
4735 netif_device_detach(dev);
4736
Yuval Mintz5d07d862012-09-13 02:56:21 +00004737 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004738
4739 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4740
4741 rtnl_unlock();
4742
4743 return 0;
4744}
4745
4746int bnx2x_resume(struct pci_dev *pdev)
4747{
4748 struct net_device *dev = pci_get_drvdata(pdev);
4749 struct bnx2x *bp;
4750 int rc;
4751
4752 if (!dev) {
4753 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4754 return -ENODEV;
4755 }
4756 bp = netdev_priv(dev);
4757
4758 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004759 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004760 return -EAGAIN;
4761 }
4762
4763 rtnl_lock();
4764
4765 pci_restore_state(pdev);
4766
4767 if (!netif_running(dev)) {
4768 rtnl_unlock();
4769 return 0;
4770 }
4771
4772 bnx2x_set_power_state(bp, PCI_D0);
4773 netif_device_attach(dev);
4774
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004775 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4776
4777 rtnl_unlock();
4778
4779 return rc;
4780}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004781
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004782void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4783 u32 cid)
4784{
4785 /* ustorm cxt validation */
4786 cxt->ustorm_ag_context.cdu_usage =
4787 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4788 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4789 /* xcontext validation */
4790 cxt->xstorm_ag_context.cdu_reserved =
4791 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4792 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4793}
4794
Eric Dumazet1191cb82012-04-27 21:39:21 +00004795static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4796 u8 fw_sb_id, u8 sb_index,
4797 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004798{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004799 u32 addr = BAR_CSTRORM_INTMEM +
4800 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4801 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004802 DP(NETIF_MSG_IFUP,
4803 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4804 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004805}
4806
Eric Dumazet1191cb82012-04-27 21:39:21 +00004807static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4808 u16 fw_sb_id, u8 sb_index,
4809 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004810{
4811 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4812 u32 addr = BAR_CSTRORM_INTMEM +
4813 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004814 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004815 /* clear and set */
4816 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4817 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004818 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004819 DP(NETIF_MSG_IFUP,
4820 "port %x fw_sb_id %d sb_index %d disable %d\n",
4821 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004822}
4823
4824void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4825 u8 sb_index, u8 disable, u16 usec)
4826{
4827 int port = BP_PORT(bp);
4828 u8 ticks = usec / BNX2X_BTR;
4829
4830 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4831
4832 disable = disable ? 1 : (usec ? 0 : 1);
4833 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4834}