blob: cd74ee5be5f4bc73c302d0aa0f88fb9fac70ea7b [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000040 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000042 * source onto the target. Update txdata pointers and related
43 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000044 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000049 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000053 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000055
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000059 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000062
Barak Witkowski15192a82012-06-19 07:48:28 +000063 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
Merav Sicron65565882012-06-19 07:48:26 +000069 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
Yuval Mintz4864a162013-01-10 04:53:39 +000082 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +000084 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000086}
87
Ariel Elior8ca5e172013-01-01 05:22:34 +000088/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000113 }
114}
115
David S. Miller4b87f922013-01-15 15:05:59 -0500116/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000149{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
Merav Sicron51c1a582012-03-18 10:33:38 +0000160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000161 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000162
163 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000167
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300168
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000203 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000207
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000208 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
Ariel Elior6383c0b2011-07-14 08:31:57 +0000215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000217 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000219 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
Ariel Elior6383c0b2011-07-14 08:31:57 +0000226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
Merav Sicron51c1a582012-03-18 10:33:38 +0000235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238
Tom Herbert2df1a702011-11-28 16:33:37 +0000239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000240 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000242 sw_cons++;
243 }
244
Tom Herbert2df1a702011-11-28 16:33:37 +0000245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
Ariel Elior6383c0b2011-07-14 08:31:57 +0000247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000258 */
259 smp_mb();
260
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296{
297 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000315 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317
318 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
Yuval Mintz2de67432013-01-23 03:21:43 +0000346/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000352{
Yuval Mintz2de67432013-01-23 03:21:43 +0000353 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000354 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000361 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000362 }
363 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000364 return 0;
365}
366
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000368 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300369 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
Eric Dumazete52fcb22011-11-14 06:05:34 +0000383 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300384 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000385 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000395 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
399
Eric Dumazete52fcb22011-11-14 06:05:34 +0000400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000403 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000421 tpa_info->gro_size = gro_size;
422 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300423
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000441 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000442 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000443 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000447 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000451 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000452 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000456{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000457 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300458 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000459 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000463 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000485}
486
Eric Dumazet1191cb82012-04-27 21:39:21 +0000487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000501 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000528 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536
537 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000542
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000563
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000572 return err;
573 }
574
575 /* Unmap the page as we r going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000578 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000579 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000594
595 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000596 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
Eric Dumazetd46d1322012-12-10 12:16:06 +0000605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
Yuval Mintz99690852013-01-14 05:11:49 +0000621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
645#endif
646
647static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
648 struct sk_buff *skb)
649{
650#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000651 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000652 skb_set_network_header(skb, 0);
653 switch (be16_to_cpu(skb->protocol)) {
654 case ETH_P_IP:
655 bnx2x_gro_ip_csum(bp, skb);
656 break;
657 case ETH_P_IPV6:
658 bnx2x_gro_ipv6_csum(bp, skb);
659 break;
660 default:
661 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
662 be16_to_cpu(skb->protocol));
663 }
664 tcp_gro_complete(skb);
665 }
666#endif
667 napi_gro_receive(&fp->napi, skb);
668}
669
Eric Dumazet1191cb82012-04-27 21:39:21 +0000670static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
671 struct bnx2x_agg_info *tpa_info,
672 u16 pages,
673 struct eth_end_agg_rx_cqe *cqe,
674 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000675{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300676 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000677 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300678 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000679 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000680 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300681 u8 old_tpa_state = tpa_info->tpa_state;
682
683 tpa_info->tpa_state = BNX2X_TPA_STOP;
684
685 /* If we there was an error during the handling of the TPA_START -
686 * drop this aggregation.
687 */
688 if (old_tpa_state == BNX2X_TPA_ERROR)
689 goto drop;
690
Eric Dumazete52fcb22011-11-14 06:05:34 +0000691 /* Try to allocate the new data */
Eric Dumazetd46d1322012-12-10 12:16:06 +0000692 new_data = bnx2x_frag_alloc(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000693 /* Unmap skb in the pool anyway, as we are going to change
694 pool entry status to BNX2X_TPA_STOP even if new skb allocation
695 fails. */
696 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800697 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000698 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000699 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000700
Eric Dumazete52fcb22011-11-14 06:05:34 +0000701 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000702#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800703 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000704 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800705 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000706 bnx2x_panic();
707 return;
708 }
709#endif
710
Eric Dumazete52fcb22011-11-14 06:05:34 +0000711 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000712 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000713 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000714 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000715
716 skb->protocol = eth_type_trans(skb, bp->dev);
717 skb->ip_summed = CHECKSUM_UNNECESSARY;
718
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000719 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
720 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300721 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
722 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000723 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000724 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000725 DP(NETIF_MSG_RX_STATUS,
726 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000727 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000728 }
729
730
Eric Dumazete52fcb22011-11-14 06:05:34 +0000731 /* put new data in bin */
732 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000733
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300734 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000735 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000736 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300737drop:
738 /* drop the packet and keep the buffer in the bin */
739 DP(NETIF_MSG_RX_STATUS,
740 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000741 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000742}
743
Eric Dumazet1191cb82012-04-27 21:39:21 +0000744static int bnx2x_alloc_rx_data(struct bnx2x *bp,
745 struct bnx2x_fastpath *fp, u16 index)
746{
747 u8 *data;
748 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
749 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
750 dma_addr_t mapping;
751
Eric Dumazetd46d1322012-12-10 12:16:06 +0000752 data = bnx2x_frag_alloc(fp);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000753 if (unlikely(data == NULL))
754 return -ENOMEM;
755
756 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
757 fp->rx_buf_size,
758 DMA_FROM_DEVICE);
759 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000760 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000761 BNX2X_ERR("Can't map rx data\n");
762 return -ENOMEM;
763 }
764
765 rx_buf->data = data;
766 dma_unmap_addr_set(rx_buf, mapping, mapping);
767
768 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
769 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
770
771 return 0;
772}
773
Barak Witkowski15192a82012-06-19 07:48:28 +0000774static
775void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
776 struct bnx2x_fastpath *fp,
777 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000778{
Michal Schmidte4889212012-09-13 12:59:44 +0000779 /* Do nothing if no L4 csum validation was done.
780 * We do not check whether IP csum was validated. For IPv4 we assume
781 * that if the card got as far as validating the L4 csum, it also
782 * validated the IP csum. IPv6 has no IP csum.
783 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000784 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000785 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000786 return;
787
Michal Schmidte4889212012-09-13 12:59:44 +0000788 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000789
790 if (cqe->fast_path_cqe.type_error_flags &
791 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
792 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000793 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000794 else
795 skb->ip_summed = CHECKSUM_UNNECESSARY;
796}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000797
798int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
799{
800 struct bnx2x *bp = fp->bp;
801 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
802 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
803 int rx_pkt = 0;
804
805#ifdef BNX2X_STOP_ON_ERROR
806 if (unlikely(bp->panic))
807 return 0;
808#endif
809
810 /* CQ "next element" is of the size of the regular element,
811 that's why it's ok here */
812 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
813 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
814 hw_comp_cons++;
815
816 bd_cons = fp->rx_bd_cons;
817 bd_prod = fp->rx_bd_prod;
818 bd_prod_fw = bd_prod;
819 sw_comp_cons = fp->rx_comp_cons;
820 sw_comp_prod = fp->rx_comp_prod;
821
822 /* Memory barrier necessary as speculative reads of the rx
823 * buffer can be ahead of the index in the status block
824 */
825 rmb();
826
827 DP(NETIF_MSG_RX_STATUS,
828 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
829 fp->index, hw_comp_cons, sw_comp_cons);
830
831 while (sw_comp_cons != hw_comp_cons) {
832 struct sw_rx_bd *rx_buf = NULL;
833 struct sk_buff *skb;
834 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300835 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000836 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300837 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000838 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000839 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000840 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000841
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300842#ifdef BNX2X_STOP_ON_ERROR
843 if (unlikely(bp->panic))
844 return 0;
845#endif
846
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000847 comp_ring_cons = RCQ_BD(sw_comp_cons);
848 bd_prod = RX_BD(bd_prod);
849 bd_cons = RX_BD(bd_cons);
850
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000851 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300852 cqe_fp = &cqe->fast_path_cqe;
853 cqe_fp_flags = cqe_fp->type_error_flags;
854 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000855
Merav Sicron51c1a582012-03-18 10:33:38 +0000856 DP(NETIF_MSG_RX_STATUS,
857 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
858 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300859 cqe_fp_flags, cqe_fp->status_flags,
860 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000861 le16_to_cpu(cqe_fp->vlan_tag),
862 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000863
864 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300865 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000866 bnx2x_sp_event(fp, cqe);
867 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000868 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000869
Eric Dumazete52fcb22011-11-14 06:05:34 +0000870 rx_buf = &fp->rx_buf_ring[bd_cons];
871 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000872
Eric Dumazete52fcb22011-11-14 06:05:34 +0000873 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000874 struct bnx2x_agg_info *tpa_info;
875 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300876#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000877 /* sanity check */
878 if (fp->disable_tpa &&
879 (CQE_TYPE_START(cqe_fp_type) ||
880 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000881 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000882 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300883#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884
Eric Dumazete52fcb22011-11-14 06:05:34 +0000885 if (CQE_TYPE_START(cqe_fp_type)) {
886 u16 queue = cqe_fp->queue_index;
887 DP(NETIF_MSG_RX_STATUS,
888 "calling tpa_start on queue %d\n",
889 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000890
Eric Dumazete52fcb22011-11-14 06:05:34 +0000891 bnx2x_tpa_start(fp, queue,
892 bd_cons, bd_prod,
893 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000894
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000895 goto next_rx;
896
897 }
898 queue = cqe->end_agg_cqe.queue_index;
899 tpa_info = &fp->tpa_info[queue];
900 DP(NETIF_MSG_RX_STATUS,
901 "calling tpa_stop on queue %d\n",
902 queue);
903
904 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
905 tpa_info->len_on_bd;
906
907 if (fp->mode == TPA_MODE_GRO)
908 pages = (frag_size + tpa_info->full_page - 1) /
909 tpa_info->full_page;
910 else
911 pages = SGE_PAGE_ALIGN(frag_size) >>
912 SGE_PAGE_SHIFT;
913
914 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
915 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000917 if (bp->panic)
918 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000919#endif
920
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000921 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
922 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000923 }
924 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000925 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000926 pad = cqe_fp->placement_offset;
927 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000928 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000929 pad + RX_COPY_THRESH,
930 DMA_FROM_DEVICE);
931 pad += NET_SKB_PAD;
932 prefetch(data + pad); /* speedup eth_type_trans() */
933 /* is this an error packet? */
934 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000935 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000936 "ERROR flags %x rx packet %u\n",
937 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000938 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000939 goto reuse_rx;
940 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000941
Eric Dumazete52fcb22011-11-14 06:05:34 +0000942 /* Since we don't have a jumbo ring
943 * copy small packets if mtu > 1500
944 */
945 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
946 (len <= RX_COPY_THRESH)) {
947 skb = netdev_alloc_skb_ip_align(bp->dev, len);
948 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000949 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000950 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000951 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000952 goto reuse_rx;
953 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000954 memcpy(skb->data, data + pad, len);
955 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
956 } else {
957 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000958 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000959 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800960 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000961 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000962 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000963 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000964 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000965 bnx2x_fp_qstats(bp, fp)->
966 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000967 goto next_rx;
968 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000969 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000970 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000971 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
972 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000973 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000975 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000976 goto next_rx;
977 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000978 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000979
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000980 skb_put(skb, len);
981 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000982
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000983 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000984 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
985 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000986
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000987 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000988
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000989 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000990 bnx2x_csum_validate(skb, cqe, fp,
991 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000992
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000993 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000994
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300995 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
996 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000997 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300998 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000999 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001000
1001
1002next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001003 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001004
1005 bd_cons = NEXT_RX_IDX(bd_cons);
1006 bd_prod = NEXT_RX_IDX(bd_prod);
1007 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1008 rx_pkt++;
1009next_cqe:
1010 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1011 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1012
1013 if (rx_pkt == budget)
1014 break;
1015 } /* while */
1016
1017 fp->rx_bd_cons = bd_cons;
1018 fp->rx_bd_prod = bd_prod_fw;
1019 fp->rx_comp_cons = sw_comp_cons;
1020 fp->rx_comp_prod = sw_comp_prod;
1021
1022 /* Update producers */
1023 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1024 fp->rx_sge_prod);
1025
1026 fp->rx_pkt += rx_pkt;
1027 fp->rx_calls++;
1028
1029 return rx_pkt;
1030}
1031
1032static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1033{
1034 struct bnx2x_fastpath *fp = fp_cookie;
1035 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001036 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001037
Merav Sicron51c1a582012-03-18 10:33:38 +00001038 DP(NETIF_MSG_INTR,
1039 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001040 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1041 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001042
1043#ifdef BNX2X_STOP_ON_ERROR
1044 if (unlikely(bp->panic))
1045 return IRQ_HANDLED;
1046#endif
1047
1048 /* Handle Rx and Tx according to MSI-X vector */
1049 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001050
1051 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001052 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001053
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001054 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001055 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1056
1057 return IRQ_HANDLED;
1058}
1059
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001060/* HW Lock for shared dual port PHYs */
1061void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1062{
1063 mutex_lock(&bp->port.phy_mutex);
1064
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001065 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001066}
1067
1068void bnx2x_release_phy_lock(struct bnx2x *bp)
1069{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071
1072 mutex_unlock(&bp->port.phy_mutex);
1073}
1074
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001075/* calculates MF speed according to current linespeed and MF configuration */
1076u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1077{
1078 u16 line_speed = bp->link_vars.line_speed;
1079 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001080 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1081 bp->mf_config[BP_VN(bp)]);
1082
1083 /* Calculate the current MAX line speed limit for the MF
1084 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001085 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001086 if (IS_MF_SI(bp))
1087 line_speed = (line_speed * maxCfg) / 100;
1088 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001089 u16 vn_max_rate = maxCfg * 100;
1090
1091 if (vn_max_rate < line_speed)
1092 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001093 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001094 }
1095
1096 return line_speed;
1097}
1098
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001099/**
1100 * bnx2x_fill_report_data - fill link report data to report
1101 *
1102 * @bp: driver handle
1103 * @data: link state to update
1104 *
1105 * It uses a none-atomic bit operations because is called under the mutex.
1106 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001107static void bnx2x_fill_report_data(struct bnx2x *bp,
1108 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001109{
1110 u16 line_speed = bnx2x_get_mf_speed(bp);
1111
1112 memset(data, 0, sizeof(*data));
1113
1114 /* Fill the report data: efective line speed */
1115 data->line_speed = line_speed;
1116
1117 /* Link is down */
1118 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1119 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1120 &data->link_report_flags);
1121
1122 /* Full DUPLEX */
1123 if (bp->link_vars.duplex == DUPLEX_FULL)
1124 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1125
1126 /* Rx Flow Control is ON */
1127 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1128 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1129
1130 /* Tx Flow Control is ON */
1131 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1132 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1133}
1134
1135/**
1136 * bnx2x_link_report - report link status to OS.
1137 *
1138 * @bp: driver handle
1139 *
1140 * Calls the __bnx2x_link_report() under the same locking scheme
1141 * as a link/PHY state managing code to ensure a consistent link
1142 * reporting.
1143 */
1144
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001145void bnx2x_link_report(struct bnx2x *bp)
1146{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001147 bnx2x_acquire_phy_lock(bp);
1148 __bnx2x_link_report(bp);
1149 bnx2x_release_phy_lock(bp);
1150}
1151
1152/**
1153 * __bnx2x_link_report - report link status to OS.
1154 *
1155 * @bp: driver handle
1156 *
1157 * None atomic inmlementation.
1158 * Should be called under the phy_lock.
1159 */
1160void __bnx2x_link_report(struct bnx2x *bp)
1161{
1162 struct bnx2x_link_report_data cur_data;
1163
1164 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001165 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001166 bnx2x_read_mf_cfg(bp);
1167
1168 /* Read the current link report info */
1169 bnx2x_fill_report_data(bp, &cur_data);
1170
1171 /* Don't report link down or exactly the same link status twice */
1172 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1173 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1174 &bp->last_reported_link.link_report_flags) &&
1175 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1176 &cur_data.link_report_flags)))
1177 return;
1178
1179 bp->link_cnt++;
1180
1181 /* We are going to report a new link parameters now -
1182 * remember the current data for the next time.
1183 */
1184 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1185
1186 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1187 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001188 netif_carrier_off(bp->dev);
1189 netdev_err(bp->dev, "NIC Link is Down\n");
1190 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001191 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001192 const char *duplex;
1193 const char *flow;
1194
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001195 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001196
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001197 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1198 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001199 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001200 else
Joe Perches94f05b02011-08-14 12:16:20 +00001201 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001202
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001203 /* Handle the FC at the end so that only these flags would be
1204 * possibly set. This way we may easily check if there is no FC
1205 * enabled.
1206 */
1207 if (cur_data.link_report_flags) {
1208 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1209 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001210 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1211 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001212 flow = "ON - receive & transmit";
1213 else
1214 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001216 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217 }
Joe Perches94f05b02011-08-14 12:16:20 +00001218 } else {
1219 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220 }
Joe Perches94f05b02011-08-14 12:16:20 +00001221 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1222 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001223 }
1224}
1225
Eric Dumazet1191cb82012-04-27 21:39:21 +00001226static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1227{
1228 int i;
1229
1230 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1231 struct eth_rx_sge *sge;
1232
1233 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1234 sge->addr_hi =
1235 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1236 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1237
1238 sge->addr_lo =
1239 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1240 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1241 }
1242}
1243
1244static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1245 struct bnx2x_fastpath *fp, int last)
1246{
1247 int i;
1248
1249 for (i = 0; i < last; i++) {
1250 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1251 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1252 u8 *data = first_buf->data;
1253
1254 if (data == NULL) {
1255 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1256 continue;
1257 }
1258 if (tpa_info->tpa_state == BNX2X_TPA_START)
1259 dma_unmap_single(&bp->pdev->dev,
1260 dma_unmap_addr(first_buf, mapping),
1261 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001262 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001263 first_buf->data = NULL;
1264 }
1265}
1266
Merav Sicron55c11942012-11-07 00:45:48 +00001267void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1268{
1269 int j;
1270
1271 for_each_rx_queue_cnic(bp, j) {
1272 struct bnx2x_fastpath *fp = &bp->fp[j];
1273
1274 fp->rx_bd_cons = 0;
1275
1276 /* Activate BD ring */
1277 /* Warning!
1278 * this will generate an interrupt (to the TSTORM)
1279 * must only be done after chip is initialized
1280 */
1281 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1282 fp->rx_sge_prod);
1283 }
1284}
1285
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001286void bnx2x_init_rx_rings(struct bnx2x *bp)
1287{
1288 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001289 u16 ring_prod;
1290 int i, j;
1291
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001292 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001293 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001294 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001296 DP(NETIF_MSG_IFUP,
1297 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1298
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001299 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001300 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001301 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001302 struct bnx2x_agg_info *tpa_info =
1303 &fp->tpa_info[i];
1304 struct sw_rx_bd *first_buf =
1305 &tpa_info->first_buf;
1306
Eric Dumazetd46d1322012-12-10 12:16:06 +00001307 first_buf->data = bnx2x_frag_alloc(fp);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001308 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001309 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1310 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001311 bnx2x_free_tpa_pool(bp, fp, i);
1312 fp->disable_tpa = 1;
1313 break;
1314 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001315 dma_unmap_addr_set(first_buf, mapping, 0);
1316 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001317 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001318
1319 /* "next page" elements initialization */
1320 bnx2x_set_next_page_sgl(fp);
1321
1322 /* set SGEs bit mask */
1323 bnx2x_init_sge_ring_bit_mask(fp);
1324
1325 /* Allocate SGEs and initialize the ring elements */
1326 for (i = 0, ring_prod = 0;
1327 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1328
1329 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001330 BNX2X_ERR("was only able to allocate %d rx sges\n",
1331 i);
1332 BNX2X_ERR("disabling TPA for queue[%d]\n",
1333 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001334 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001335 bnx2x_free_rx_sge_range(bp, fp,
1336 ring_prod);
1337 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001338 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001339 fp->disable_tpa = 1;
1340 ring_prod = 0;
1341 break;
1342 }
1343 ring_prod = NEXT_SGE_IDX(ring_prod);
1344 }
1345
1346 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001347 }
1348 }
1349
Merav Sicron55c11942012-11-07 00:45:48 +00001350 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001351 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001355 /* Activate BD ring */
1356 /* Warning!
1357 * this will generate an interrupt (to the TSTORM)
1358 * must only be done after chip is initialized
1359 */
1360 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001362
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363 if (j != 0)
1364 continue;
1365
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001366 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001367 REG_WR(bp, BAR_USTRORM_INTMEM +
1368 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1369 U64_LO(fp->rx_comp_mapping));
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1372 U64_HI(fp->rx_comp_mapping));
1373 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374 }
1375}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001376
Merav Sicron55c11942012-11-07 00:45:48 +00001377static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1378{
1379 u8 cos;
1380 struct bnx2x *bp = fp->bp;
1381
1382 for_each_cos_in_tx_queue(fp, cos) {
1383 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1384 unsigned pkts_compl = 0, bytes_compl = 0;
1385
1386 u16 sw_prod = txdata->tx_pkt_prod;
1387 u16 sw_cons = txdata->tx_pkt_cons;
1388
1389 while (sw_cons != sw_prod) {
1390 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1391 &pkts_compl, &bytes_compl);
1392 sw_cons++;
1393 }
1394
1395 netdev_tx_reset_queue(
1396 netdev_get_tx_queue(bp->dev,
1397 txdata->txq_index));
1398 }
1399}
1400
1401static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1402{
1403 int i;
1404
1405 for_each_tx_queue_cnic(bp, i) {
1406 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1407 }
1408}
1409
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1411{
1412 int i;
1413
Merav Sicron55c11942012-11-07 00:45:48 +00001414 for_each_eth_queue(bp, i) {
1415 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001416 }
1417}
1418
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001419static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1420{
1421 struct bnx2x *bp = fp->bp;
1422 int i;
1423
1424 /* ring wasn't allocated */
1425 if (fp->rx_buf_ring == NULL)
1426 return;
1427
1428 for (i = 0; i < NUM_RX_BD; i++) {
1429 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001430 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001431
Eric Dumazete52fcb22011-11-14 06:05:34 +00001432 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001433 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001434 dma_unmap_single(&bp->pdev->dev,
1435 dma_unmap_addr(rx_buf, mapping),
1436 fp->rx_buf_size, DMA_FROM_DEVICE);
1437
Eric Dumazete52fcb22011-11-14 06:05:34 +00001438 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001439 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001440 }
1441}
1442
Merav Sicron55c11942012-11-07 00:45:48 +00001443static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1444{
1445 int j;
1446
1447 for_each_rx_queue_cnic(bp, j) {
1448 bnx2x_free_rx_bds(&bp->fp[j]);
1449 }
1450}
1451
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001452static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1453{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001454 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001455
Merav Sicron55c11942012-11-07 00:45:48 +00001456 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001457 struct bnx2x_fastpath *fp = &bp->fp[j];
1458
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001459 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001460
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001461 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001462 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001463 }
1464}
1465
Merav Sicron55c11942012-11-07 00:45:48 +00001466void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1467{
1468 bnx2x_free_tx_skbs_cnic(bp);
1469 bnx2x_free_rx_skbs_cnic(bp);
1470}
1471
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472void bnx2x_free_skbs(struct bnx2x *bp)
1473{
1474 bnx2x_free_tx_skbs(bp);
1475 bnx2x_free_rx_skbs(bp);
1476}
1477
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001478void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1479{
1480 /* load old values */
1481 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1482
1483 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1484 /* leave all but MAX value */
1485 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1486
1487 /* set new MAX value */
1488 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1489 & FUNC_MF_CFG_MAX_BW_MASK;
1490
1491 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1492 }
1493}
1494
Dmitry Kravkovca924292011-06-14 01:33:08 +00001495/**
1496 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1497 *
1498 * @bp: driver handle
1499 * @nvecs: number of vectors to be released
1500 */
1501static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001502{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001503 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001504
Dmitry Kravkovca924292011-06-14 01:33:08 +00001505 if (nvecs == offset)
1506 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001507
1508 /* VFs don't have a default SB */
1509 if (IS_PF(bp)) {
1510 free_irq(bp->msix_table[offset].vector, bp->dev);
1511 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1512 bp->msix_table[offset].vector);
1513 offset++;
1514 }
Merav Sicron55c11942012-11-07 00:45:48 +00001515
1516 if (CNIC_SUPPORT(bp)) {
1517 if (nvecs == offset)
1518 return;
1519 offset++;
1520 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521
Dmitry Kravkovca924292011-06-14 01:33:08 +00001522 for_each_eth_queue(bp, i) {
1523 if (nvecs == offset)
1524 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001525 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1526 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001527
1528 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001529 }
1530}
1531
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001532void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001533{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001534 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001535 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1536 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1537
1538 /* vfs don't have a default status block */
1539 if (IS_PF(bp))
1540 nvecs++;
1541
1542 bnx2x_free_msix_irqs(bp, nvecs);
1543 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001544 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001545 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001546}
1547
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001548int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001549{
Ariel Elior1ab44342013-01-01 05:22:23 +00001550 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001551
Ariel Elior1ab44342013-01-01 05:22:23 +00001552 /* VFs don't have a default status block */
1553 if (IS_PF(bp)) {
1554 bp->msix_table[msix_vec].entry = msix_vec;
1555 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1556 bp->msix_table[0].entry);
1557 msix_vec++;
1558 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001559
Merav Sicron55c11942012-11-07 00:45:48 +00001560 /* Cnic requires an msix vector for itself */
1561 if (CNIC_SUPPORT(bp)) {
1562 bp->msix_table[msix_vec].entry = msix_vec;
1563 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1564 msix_vec, bp->msix_table[msix_vec].entry);
1565 msix_vec++;
1566 }
1567
Ariel Elior6383c0b2011-07-14 08:31:57 +00001568 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001569 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001570 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001571 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1572 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001573 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001574 }
1575
Ariel Elior1ab44342013-01-01 05:22:23 +00001576 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1577 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001578
Ariel Elior1ab44342013-01-01 05:22:23 +00001579 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001580
1581 /*
1582 * reconfigure number of tx/rx queues according to available
1583 * MSI-X vectors
1584 */
Merav Sicron55c11942012-11-07 00:45:48 +00001585 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001586 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001587 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001588
Merav Sicron51c1a582012-03-18 10:33:38 +00001589 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001590
1591 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1592
1593 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001594 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1595 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001596 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001597 /*
1598 * decrease number of queues by number of unallocated entries
1599 */
Merav Sicron55c11942012-11-07 00:45:48 +00001600 bp->num_ethernet_queues -= diff;
1601 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001602
Merav Sicron51c1a582012-03-18 10:33:38 +00001603 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001604 bp->num_queues);
1605 } else if (rc > 0) {
1606 /* Get by with single vector */
1607 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1608 if (rc) {
1609 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1610 rc);
1611 goto no_msix;
1612 }
1613
1614 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1615 bp->flags |= USING_SINGLE_MSIX_FLAG;
1616
Merav Sicron55c11942012-11-07 00:45:48 +00001617 BNX2X_DEV_INFO("set number of queues to 1\n");
1618 bp->num_ethernet_queues = 1;
1619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001620 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001621 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001622 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001623 }
1624
1625 bp->flags |= USING_MSIX_FLAG;
1626
1627 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001628
1629no_msix:
1630 /* fall to INTx if not enough memory */
1631 if (rc == -ENOMEM)
1632 bp->flags |= DISABLE_MSI_FLAG;
1633
1634 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001635}
1636
1637static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1638{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001639 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001640
Ariel Eliorad5afc82013-01-01 05:22:26 +00001641 /* no default status block for vf */
1642 if (IS_PF(bp)) {
1643 rc = request_irq(bp->msix_table[offset++].vector,
1644 bnx2x_msix_sp_int, 0,
1645 bp->dev->name, bp->dev);
1646 if (rc) {
1647 BNX2X_ERR("request sp irq failed\n");
1648 return -EBUSY;
1649 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001650 }
1651
Merav Sicron55c11942012-11-07 00:45:48 +00001652 if (CNIC_SUPPORT(bp))
1653 offset++;
1654
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001655 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001656 struct bnx2x_fastpath *fp = &bp->fp[i];
1657 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1658 bp->dev->name, i);
1659
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001660 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001661 bnx2x_msix_fp_int, 0, fp->name, fp);
1662 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001663 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1664 bp->msix_table[offset].vector, rc);
1665 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001666 return -EBUSY;
1667 }
1668
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001669 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001670 }
1671
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001672 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001673 if (IS_PF(bp)) {
1674 offset = 1 + CNIC_SUPPORT(bp);
1675 netdev_info(bp->dev,
1676 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1677 bp->msix_table[0].vector,
1678 0, bp->msix_table[offset].vector,
1679 i - 1, bp->msix_table[offset + i - 1].vector);
1680 } else {
1681 offset = CNIC_SUPPORT(bp);
1682 netdev_info(bp->dev,
1683 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1684 0, bp->msix_table[offset].vector,
1685 i - 1, bp->msix_table[offset + i - 1].vector);
1686 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001687 return 0;
1688}
1689
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001690int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001691{
1692 int rc;
1693
1694 rc = pci_enable_msi(bp->pdev);
1695 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001696 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001697 return -1;
1698 }
1699 bp->flags |= USING_MSI_FLAG;
1700
1701 return 0;
1702}
1703
1704static int bnx2x_req_irq(struct bnx2x *bp)
1705{
1706 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001707 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001708
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001709 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001710 flags = 0;
1711 else
1712 flags = IRQF_SHARED;
1713
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001714 if (bp->flags & USING_MSIX_FLAG)
1715 irq = bp->msix_table[0].vector;
1716 else
1717 irq = bp->pdev->irq;
1718
1719 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001720}
1721
Eric Dumazet1191cb82012-04-27 21:39:21 +00001722static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001723{
1724 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001725 if (bp->flags & USING_MSIX_FLAG &&
1726 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001727 rc = bnx2x_req_msix_irqs(bp);
1728 if (rc)
1729 return rc;
1730 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001731 rc = bnx2x_req_irq(bp);
1732 if (rc) {
1733 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1734 return rc;
1735 }
1736 if (bp->flags & USING_MSI_FLAG) {
1737 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001738 netdev_info(bp->dev, "using MSI IRQ %d\n",
1739 bp->dev->irq);
1740 }
1741 if (bp->flags & USING_MSIX_FLAG) {
1742 bp->dev->irq = bp->msix_table[0].vector;
1743 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1744 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001745 }
1746 }
1747
1748 return 0;
1749}
1750
Merav Sicron55c11942012-11-07 00:45:48 +00001751static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1752{
1753 int i;
1754
1755 for_each_rx_queue_cnic(bp, i)
1756 napi_enable(&bnx2x_fp(bp, i, napi));
1757}
1758
Eric Dumazet1191cb82012-04-27 21:39:21 +00001759static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001760{
1761 int i;
1762
Merav Sicron55c11942012-11-07 00:45:48 +00001763 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001764 napi_enable(&bnx2x_fp(bp, i, napi));
1765}
1766
Merav Sicron55c11942012-11-07 00:45:48 +00001767static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1768{
1769 int i;
1770
1771 for_each_rx_queue_cnic(bp, i)
1772 napi_disable(&bnx2x_fp(bp, i, napi));
1773}
1774
Eric Dumazet1191cb82012-04-27 21:39:21 +00001775static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001776{
1777 int i;
1778
Merav Sicron55c11942012-11-07 00:45:48 +00001779 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001780 napi_disable(&bnx2x_fp(bp, i, napi));
1781}
1782
1783void bnx2x_netif_start(struct bnx2x *bp)
1784{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001785 if (netif_running(bp->dev)) {
1786 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001787 if (CNIC_LOADED(bp))
1788 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001789 bnx2x_int_enable(bp);
1790 if (bp->state == BNX2X_STATE_OPEN)
1791 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001792 }
1793}
1794
1795void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1796{
1797 bnx2x_int_disable_sync(bp, disable_hw);
1798 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001799 if (CNIC_LOADED(bp))
1800 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001801}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001802
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001803u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1804{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001805 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001806
Merav Sicron55c11942012-11-07 00:45:48 +00001807 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001808 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1809 u16 ether_type = ntohs(hdr->h_proto);
1810
1811 /* Skip VLAN tag if present */
1812 if (ether_type == ETH_P_8021Q) {
1813 struct vlan_ethhdr *vhdr =
1814 (struct vlan_ethhdr *)skb->data;
1815
1816 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1817 }
1818
1819 /* If ethertype is FCoE or FIP - use FCoE ring */
1820 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001821 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001822 }
Merav Sicron55c11942012-11-07 00:45:48 +00001823
David S. Miller823dcd22011-08-20 10:39:12 -07001824 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001825 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001826}
1827
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001828void bnx2x_set_num_queues(struct bnx2x *bp)
1829{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001830 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001831 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001832
Barak Witkowskia3348722012-04-23 03:04:46 +00001833 /* override in STORAGE SD modes */
1834 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001835 bp->num_ethernet_queues = 1;
1836
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001837 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001838 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1839 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001840
1841 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001842}
1843
David S. Miller823dcd22011-08-20 10:39:12 -07001844/**
1845 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1846 *
1847 * @bp: Driver handle
1848 *
1849 * We currently support for at most 16 Tx queues for each CoS thus we will
1850 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1851 * bp->max_cos.
1852 *
1853 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1854 * index after all ETH L2 indices.
1855 *
1856 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1857 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1858 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1859 *
1860 * The proper configuration of skb->queue_mapping is handled by
1861 * bnx2x_select_queue() and __skb_tx_hash().
1862 *
1863 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1864 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1865 */
Merav Sicron55c11942012-11-07 00:45:48 +00001866static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001867{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001868 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001869
Merav Sicron65565882012-06-19 07:48:26 +00001870 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001871 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001872
1873/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001874 if (include_cnic && !NO_FCOE(bp)) {
1875 rx++;
1876 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001877 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001878
1879 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1880 if (rc) {
1881 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1882 return rc;
1883 }
1884 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1885 if (rc) {
1886 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1887 return rc;
1888 }
1889
Merav Sicron51c1a582012-03-18 10:33:38 +00001890 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001891 tx, rx);
1892
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001893 return rc;
1894}
1895
Eric Dumazet1191cb82012-04-27 21:39:21 +00001896static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001897{
1898 int i;
1899
1900 for_each_queue(bp, i) {
1901 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001902 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001903
1904 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1905 if (IS_FCOE_IDX(i))
1906 /*
1907 * Although there are no IP frames expected to arrive to
1908 * this ring we still want to add an
1909 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1910 * overrun attack.
1911 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001912 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001913 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001914 mtu = bp->dev->mtu;
1915 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1916 IP_HEADER_ALIGNMENT_PADDING +
1917 ETH_OVREHEAD +
1918 mtu +
1919 BNX2X_FW_RX_ALIGN_END;
1920 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001921 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1922 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1923 else
1924 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001925 }
1926}
1927
Eric Dumazet1191cb82012-04-27 21:39:21 +00001928static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001929{
1930 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001931 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1932
Dmitry Kravkov96305232012-04-03 18:41:30 +00001933 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001934 * enabled
1935 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001936 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1937 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001938 bp->fp->cl_id +
1939 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001940
1941 /*
1942 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1943 * per-port, so if explicit configuration is needed , do it only
1944 * for a PMF.
1945 *
1946 * For 57712 and newer on the other hand it's a per-function
1947 * configuration.
1948 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001949 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001950}
1951
Dmitry Kravkov96305232012-04-03 18:41:30 +00001952int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001953 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001954{
Yuval Mintz3b603062012-03-18 10:33:39 +00001955 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001956
1957 /* Although RSS is meaningless when there is a single HW queue we
1958 * still need it enabled in order to have HW Rx hash generated.
1959 *
1960 * if (!is_eth_multi(bp))
1961 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1962 */
1963
Dmitry Kravkov96305232012-04-03 18:41:30 +00001964 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001965
1966 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1967
Dmitry Kravkov96305232012-04-03 18:41:30 +00001968 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001969
Dmitry Kravkov96305232012-04-03 18:41:30 +00001970 /* RSS configuration */
1971 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1972 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1973 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1974 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001975 if (rss_obj->udp_rss_v4)
1976 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1977 if (rss_obj->udp_rss_v6)
1978 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001979
Dmitry Kravkov96305232012-04-03 18:41:30 +00001980 /* Hash bits */
1981 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001982
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001983 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001984
Dmitry Kravkov96305232012-04-03 18:41:30 +00001985 if (config_hash) {
1986 /* RSS keys */
Akinobu Mita8376d0b2012-12-17 16:04:28 -08001987 prandom_bytes(params.rss_key, sizeof(params.rss_key));
Dmitry Kravkov96305232012-04-03 18:41:30 +00001988 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001989 }
1990
1991 return bnx2x_config_rss(bp, &params);
1992}
1993
Eric Dumazet1191cb82012-04-27 21:39:21 +00001994static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001995{
Yuval Mintz3b603062012-03-18 10:33:39 +00001996 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001997
1998 /* Prepare parameters for function state transitions */
1999 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2000
2001 func_params.f_obj = &bp->func_obj;
2002 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2003
2004 func_params.params.hw_init.load_phase = load_code;
2005
2006 return bnx2x_func_state_change(bp, &func_params);
2007}
2008
2009/*
2010 * Cleans the object that have internal lists without sending
2011 * ramrods. Should be run when interrutps are disabled.
2012 */
2013static void bnx2x_squeeze_objects(struct bnx2x *bp)
2014{
2015 int rc;
2016 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002017 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002018 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002019
2020 /***************** Cleanup MACs' object first *************************/
2021
2022 /* Wait for completion of requested */
2023 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2024 /* Perform a dry cleanup */
2025 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2026
2027 /* Clean ETH primary MAC */
2028 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002029 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002030 &ramrod_flags);
2031 if (rc != 0)
2032 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2033
2034 /* Cleanup UC list */
2035 vlan_mac_flags = 0;
2036 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2037 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2038 &ramrod_flags);
2039 if (rc != 0)
2040 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2041
2042 /***************** Now clean mcast object *****************************/
2043 rparam.mcast_obj = &bp->mcast_obj;
2044 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2045
2046 /* Add a DEL command... */
2047 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2048 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002049 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2050 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002051
2052 /* ...and wait until all pending commands are cleared */
2053 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2054 while (rc != 0) {
2055 if (rc < 0) {
2056 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2057 rc);
2058 return;
2059 }
2060
2061 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2062 }
2063}
2064
2065#ifndef BNX2X_STOP_ON_ERROR
2066#define LOAD_ERROR_EXIT(bp, label) \
2067 do { \
2068 (bp)->state = BNX2X_STATE_ERROR; \
2069 goto label; \
2070 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002071
2072#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2073 do { \
2074 bp->cnic_loaded = false; \
2075 goto label; \
2076 } while (0)
2077#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002078#define LOAD_ERROR_EXIT(bp, label) \
2079 do { \
2080 (bp)->state = BNX2X_STATE_ERROR; \
2081 (bp)->panic = 1; \
2082 return -EBUSY; \
2083 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002084#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2085 do { \
2086 bp->cnic_loaded = false; \
2087 (bp)->panic = 1; \
2088 return -EBUSY; \
2089 } while (0)
2090#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002091
Ariel Eliorad5afc82013-01-01 05:22:26 +00002092static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002093{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002094 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2095 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2096 return;
2097}
Yuval Mintz452427b2012-03-26 20:47:07 +00002098
Ariel Eliorad5afc82013-01-01 05:22:26 +00002099static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2100{
Ariel Elior8db573b2013-01-01 05:22:37 +00002101 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002102 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002103
Ariel Eliorad5afc82013-01-01 05:22:26 +00002104 /* number of queues for statistics is number of eth queues + FCoE */
2105 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002106
Ariel Eliorad5afc82013-01-01 05:22:26 +00002107 /* Total number of FW statistics requests =
2108 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2109 * and fcoe l2 queue) stats + num of queues (which includes another 1
2110 * for fcoe l2 queue if applicable)
2111 */
2112 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2113
Ariel Elior8db573b2013-01-01 05:22:37 +00002114 /* vf stats appear in the request list, but their data is allocated by
2115 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2116 * it is used to determine where to place the vf stats queries in the
2117 * request struct
2118 */
2119 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002120 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002121
Ariel Eliorad5afc82013-01-01 05:22:26 +00002122 /* Request is built from stats_query_header and an array of
2123 * stats_query_cmd_group each of which contains
2124 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2125 * configured in the stats_query_header.
2126 */
2127 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002128 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2129 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002130 1 : 0));
2131
Ariel Elior8db573b2013-01-01 05:22:37 +00002132 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2133 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002134 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2135 num_groups * sizeof(struct stats_query_cmd_group);
2136
2137 /* Data for statistics requests + stats_counter
2138 * stats_counter holds per-STORM counters that are incremented
2139 * when STORM has finished with the current request.
2140 * memory for FCoE offloaded statistics are counted anyway,
2141 * even if they will not be sent.
2142 * VF stats are not accounted for here as the data of VF stats is stored
2143 * in memory allocated by the VF, not here.
2144 */
2145 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2146 sizeof(struct per_pf_stats) +
2147 sizeof(struct fcoe_statistics_params) +
2148 sizeof(struct per_queue_stats) * num_queue_stats +
2149 sizeof(struct stats_counter);
2150
2151 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2152 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2153
2154 /* Set shortcuts */
2155 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2156 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2157 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2158 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2159 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2160 bp->fw_stats_req_sz;
2161
2162 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2163 U64_HI(bp->fw_stats_req_mapping),
2164 U64_LO(bp->fw_stats_req_mapping));
2165 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2166 U64_HI(bp->fw_stats_data_mapping),
2167 U64_LO(bp->fw_stats_data_mapping));
2168 return 0;
2169
2170alloc_mem_err:
2171 bnx2x_free_fw_stats_mem(bp);
2172 BNX2X_ERR("Can't allocate FW stats memory\n");
2173 return -ENOMEM;
2174}
2175
2176/* send load request to mcp and analyze response */
2177static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2178{
2179 /* init fw_seq */
2180 bp->fw_seq =
2181 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2182 DRV_MSG_SEQ_NUMBER_MASK);
2183 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2184
2185 /* Get current FW pulse sequence */
2186 bp->fw_drv_pulse_wr_seq =
2187 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2188 DRV_PULSE_SEQ_MASK);
2189 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2190
2191 /* load request */
2192 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2193 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2194
2195 /* if mcp fails to respond we must abort */
2196 if (!(*load_code)) {
2197 BNX2X_ERR("MCP response failure, aborting\n");
2198 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002199 }
2200
Ariel Eliorad5afc82013-01-01 05:22:26 +00002201 /* If mcp refused (e.g. other port is in diagnostic mode) we
2202 * must abort
2203 */
2204 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2205 BNX2X_ERR("MCP refused load request, aborting\n");
2206 return -EBUSY;
2207 }
2208 return 0;
2209}
2210
2211/* check whether another PF has already loaded FW to chip. In
2212 * virtualized environments a pf from another VM may have already
2213 * initialized the device including loading FW
2214 */
2215int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2216{
2217 /* is another pf loaded on this engine? */
2218 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2219 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2220 /* build my FW version dword */
2221 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2222 (BCM_5710_FW_MINOR_VERSION << 8) +
2223 (BCM_5710_FW_REVISION_VERSION << 16) +
2224 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2225
2226 /* read loaded FW from chip */
2227 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2228
2229 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2230 loaded_fw, my_fw);
2231
2232 /* abort nic load if version mismatch */
2233 if (my_fw != loaded_fw) {
2234 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2235 loaded_fw, my_fw);
2236 return -EBUSY;
2237 }
2238 }
2239 return 0;
2240}
2241
2242/* returns the "mcp load_code" according to global load_count array */
2243static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2244{
2245 int path = BP_PATH(bp);
2246
2247 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2248 path, load_count[path][0], load_count[path][1],
2249 load_count[path][2]);
2250 load_count[path][0]++;
2251 load_count[path][1 + port]++;
2252 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2253 path, load_count[path][0], load_count[path][1],
2254 load_count[path][2]);
2255 if (load_count[path][0] == 1)
2256 return FW_MSG_CODE_DRV_LOAD_COMMON;
2257 else if (load_count[path][1 + port] == 1)
2258 return FW_MSG_CODE_DRV_LOAD_PORT;
2259 else
2260 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2261}
2262
2263/* mark PMF if applicable */
2264static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2265{
2266 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2267 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2268 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2269 bp->port.pmf = 1;
2270 /* We need the barrier to ensure the ordering between the
2271 * writing to bp->port.pmf here and reading it from the
2272 * bnx2x_periodic_task().
2273 */
2274 smp_mb();
2275 } else {
2276 bp->port.pmf = 0;
2277 }
2278
2279 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2280}
2281
2282static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2283{
2284 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2285 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2286 (bp->common.shmem2_base)) {
2287 if (SHMEM2_HAS(bp, dcc_support))
2288 SHMEM2_WR(bp, dcc_support,
2289 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2290 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2291 if (SHMEM2_HAS(bp, afex_driver_support))
2292 SHMEM2_WR(bp, afex_driver_support,
2293 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2294 }
2295
2296 /* Set AFEX default VLAN tag to an invalid value */
2297 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002298}
2299
Eric Dumazet1191cb82012-04-27 21:39:21 +00002300/**
2301 * bnx2x_bz_fp - zero content of the fastpath structure.
2302 *
2303 * @bp: driver handle
2304 * @index: fastpath index to be zeroed
2305 *
2306 * Makes sure the contents of the bp->fp[index].napi is kept
2307 * intact.
2308 */
2309static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2310{
2311 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00002312
Merav Sicron65565882012-06-19 07:48:26 +00002313 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002314 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002315 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002316 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002317 if (fp->tpa_info)
2318 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2319 sizeof(struct bnx2x_agg_info));
2320 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002321
2322 /* Restore the NAPI object as it has been already initialized */
2323 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002324 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002325 fp->bp = bp;
2326 fp->index = index;
2327 if (IS_ETH_FP(fp))
2328 fp->max_cos = bp->max_cos;
2329 else
2330 /* Special queues support only one CoS */
2331 fp->max_cos = 1;
2332
Merav Sicron65565882012-06-19 07:48:26 +00002333 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002334 if (IS_FCOE_FP(fp))
2335 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002336 if (IS_ETH_FP(fp))
2337 for_each_cos_in_tx_queue(fp, cos)
2338 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2339 BNX2X_NUM_ETH_QUEUES(bp) + index];
2340
Eric Dumazet1191cb82012-04-27 21:39:21 +00002341 /*
2342 * set the tpa flag for each queue. The tpa flag determines the queue
2343 * minimal size so it must be set prior to queue memory allocation
2344 */
2345 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2346 (bp->flags & GRO_ENABLE_FLAG &&
2347 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2348 if (bp->flags & TPA_ENABLE_FLAG)
2349 fp->mode = TPA_MODE_LRO;
2350 else if (bp->flags & GRO_ENABLE_FLAG)
2351 fp->mode = TPA_MODE_GRO;
2352
Eric Dumazet1191cb82012-04-27 21:39:21 +00002353 /* We don't want TPA on an FCoE L2 ring */
2354 if (IS_FCOE_FP(fp))
2355 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002356}
2357
2358int bnx2x_load_cnic(struct bnx2x *bp)
2359{
2360 int i, rc, port = BP_PORT(bp);
2361
2362 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2363
2364 mutex_init(&bp->cnic_mutex);
2365
Ariel Eliorad5afc82013-01-01 05:22:26 +00002366 if (IS_PF(bp)) {
2367 rc = bnx2x_alloc_mem_cnic(bp);
2368 if (rc) {
2369 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2370 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2371 }
Merav Sicron55c11942012-11-07 00:45:48 +00002372 }
2373
2374 rc = bnx2x_alloc_fp_mem_cnic(bp);
2375 if (rc) {
2376 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2377 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2378 }
2379
2380 /* Update the number of queues with the cnic queues */
2381 rc = bnx2x_set_real_num_queues(bp, 1);
2382 if (rc) {
2383 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2384 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2385 }
2386
2387 /* Add all CNIC NAPI objects */
2388 bnx2x_add_all_napi_cnic(bp);
2389 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2390 bnx2x_napi_enable_cnic(bp);
2391
2392 rc = bnx2x_init_hw_func_cnic(bp);
2393 if (rc)
2394 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2395
2396 bnx2x_nic_init_cnic(bp);
2397
Ariel Eliorad5afc82013-01-01 05:22:26 +00002398 if (IS_PF(bp)) {
2399 /* Enable Timer scan */
2400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002401
Ariel Eliorad5afc82013-01-01 05:22:26 +00002402 /* setup cnic queues */
2403 for_each_cnic_queue(bp, i) {
2404 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2405 if (rc) {
2406 BNX2X_ERR("Queue setup failed\n");
2407 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2408 }
Merav Sicron55c11942012-11-07 00:45:48 +00002409 }
2410 }
2411
2412 /* Initialize Rx filter. */
2413 netif_addr_lock_bh(bp->dev);
2414 bnx2x_set_rx_mode(bp->dev);
2415 netif_addr_unlock_bh(bp->dev);
2416
2417 /* re-read iscsi info */
2418 bnx2x_get_iscsi_info(bp);
2419 bnx2x_setup_cnic_irq_info(bp);
2420 bnx2x_setup_cnic_info(bp);
2421 bp->cnic_loaded = true;
2422 if (bp->state == BNX2X_STATE_OPEN)
2423 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2424
2425
2426 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2427
2428 return 0;
2429
2430#ifndef BNX2X_STOP_ON_ERROR
2431load_error_cnic2:
2432 /* Disable Timer scan */
2433 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2434
2435load_error_cnic1:
2436 bnx2x_napi_disable_cnic(bp);
2437 /* Update the number of queues without the cnic queues */
2438 rc = bnx2x_set_real_num_queues(bp, 0);
2439 if (rc)
2440 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2441load_error_cnic0:
2442 BNX2X_ERR("CNIC-related load failed\n");
2443 bnx2x_free_fp_mem_cnic(bp);
2444 bnx2x_free_mem_cnic(bp);
2445 return rc;
2446#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002447}
2448
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002449/* must be called with rtnl_lock */
2450int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2451{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002452 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002453 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002454
Merav Sicron55c11942012-11-07 00:45:48 +00002455 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2456 DP(NETIF_MSG_IFUP,
2457 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2458
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002459#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002460 if (unlikely(bp->panic)) {
2461 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002462 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002463 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002464#endif
2465
2466 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2467
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002468 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2469 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2470 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002471
Ariel Eliorad5afc82013-01-01 05:22:26 +00002472 if (IS_PF(bp))
2473 /* must be called before memory allocation and HW init */
2474 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002475
Ariel Elior6383c0b2011-07-14 08:31:57 +00002476 /*
2477 * Zero fastpath structures preserving invariants like napi, which are
2478 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002479 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002480 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002481 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002482 for_each_queue(bp, i)
2483 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002484 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2485 bp->num_cnic_queues) *
2486 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002487
Merav Sicron55c11942012-11-07 00:45:48 +00002488 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002489
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002490 /* Set the receive queues buffer size */
2491 bnx2x_set_rx_buf_size(bp);
2492
Ariel Eliorad5afc82013-01-01 05:22:26 +00002493 if (IS_PF(bp)) {
2494 rc = bnx2x_alloc_mem(bp);
2495 if (rc) {
2496 BNX2X_ERR("Unable to allocate bp memory\n");
2497 return rc;
2498 }
2499 }
2500
2501 /* Allocated memory for FW statistics */
2502 if (bnx2x_alloc_fw_stats_mem(bp))
2503 LOAD_ERROR_EXIT(bp, load_error0);
2504
2505 /* need to be done after alloc mem, since it's self adjusting to amount
2506 * of memory available for RSS queues
2507 */
2508 rc = bnx2x_alloc_fp_mem(bp);
2509 if (rc) {
2510 BNX2X_ERR("Unable to allocate memory for fps\n");
2511 LOAD_ERROR_EXIT(bp, load_error0);
2512 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002513
Ariel Elior8d9ac292013-01-01 05:22:27 +00002514 /* request pf to initialize status blocks */
2515 if (IS_VF(bp)) {
2516 rc = bnx2x_vfpf_init(bp);
2517 if (rc)
2518 LOAD_ERROR_EXIT(bp, load_error0);
2519 }
2520
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002521 /* As long as bnx2x_alloc_mem() may possibly update
2522 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002523 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002524 */
Merav Sicron55c11942012-11-07 00:45:48 +00002525 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002526 if (rc) {
2527 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002528 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002529 }
2530
Ariel Elior6383c0b2011-07-14 08:31:57 +00002531 /* configure multi cos mappings in kernel.
2532 * this configuration may be overriden by a multi class queue discipline
2533 * or by a dcbx negotiation result.
2534 */
2535 bnx2x_setup_tc(bp->dev, bp->max_cos);
2536
Merav Sicron26614ba2012-08-27 03:26:19 +00002537 /* Add all NAPI objects */
2538 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002539 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002540 bnx2x_napi_enable(bp);
2541
Ariel Eliorad5afc82013-01-01 05:22:26 +00002542 if (IS_PF(bp)) {
2543 /* set pf load just before approaching the MCP */
2544 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002545
Ariel Eliorad5afc82013-01-01 05:22:26 +00002546 /* if mcp exists send load request and analyze response */
2547 if (!BP_NOMCP(bp)) {
2548 /* attempt to load pf */
2549 rc = bnx2x_nic_load_request(bp, &load_code);
2550 if (rc)
2551 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002552
Ariel Eliorad5afc82013-01-01 05:22:26 +00002553 /* what did mcp say? */
2554 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2555 if (rc) {
2556 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002557 LOAD_ERROR_EXIT(bp, load_error2);
2558 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002559 } else {
2560 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002561 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002562
Ariel Eliorad5afc82013-01-01 05:22:26 +00002563 /* mark pmf if applicable */
2564 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002565
Ariel Eliorad5afc82013-01-01 05:22:26 +00002566 /* Init Function state controlling object */
2567 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002568
Ariel Eliorad5afc82013-01-01 05:22:26 +00002569 /* Initialize HW */
2570 rc = bnx2x_init_hw(bp, load_code);
2571 if (rc) {
2572 BNX2X_ERR("HW init failed, aborting\n");
2573 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2574 LOAD_ERROR_EXIT(bp, load_error2);
2575 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002576 }
2577
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002578 /* Connect to IRQs */
2579 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002580 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002581 BNX2X_ERR("setup irqs failed\n");
2582 if (IS_PF(bp))
2583 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002584 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002585 }
2586
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002587 /* Setup NIC internals and enable interrupts */
2588 bnx2x_nic_init(bp, load_code);
2589
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002590 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002591 if (IS_PF(bp)) {
2592 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002593 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002594
Ariel Eliorad5afc82013-01-01 05:22:26 +00002595 /* Set AFEX default VLAN tag to an invalid value */
2596 bp->afex_def_vlan_tag = -1;
2597 bnx2x_nic_load_afex_dcc(bp, load_code);
2598 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2599 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002600 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002601 BNX2X_ERR("Function start failed!\n");
2602 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2603
Merav Sicron55c11942012-11-07 00:45:48 +00002604 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002605 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002606
Ariel Eliorad5afc82013-01-01 05:22:26 +00002607 /* Send LOAD_DONE command to MCP */
2608 if (!BP_NOMCP(bp)) {
2609 load_code = bnx2x_fw_command(bp,
2610 DRV_MSG_CODE_LOAD_DONE, 0);
2611 if (!load_code) {
2612 BNX2X_ERR("MCP response failure, aborting\n");
2613 rc = -EBUSY;
2614 LOAD_ERROR_EXIT(bp, load_error3);
2615 }
2616 }
2617
2618 /* setup the leading queue */
2619 rc = bnx2x_setup_leading(bp);
2620 if (rc) {
2621 BNX2X_ERR("Setup leading failed!\n");
2622 LOAD_ERROR_EXIT(bp, load_error3);
2623 }
2624
2625 /* set up the rest of the queues */
2626 for_each_nondefault_eth_queue(bp, i) {
2627 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2628 if (rc) {
2629 BNX2X_ERR("Queue setup failed\n");
2630 LOAD_ERROR_EXIT(bp, load_error3);
2631 }
2632 }
2633
2634 /* setup rss */
2635 rc = bnx2x_init_rss_pf(bp);
2636 if (rc) {
2637 BNX2X_ERR("PF RSS init failed\n");
2638 LOAD_ERROR_EXIT(bp, load_error3);
2639 }
Ariel Elior8d9ac292013-01-01 05:22:27 +00002640
2641 } else { /* vf */
2642 for_each_eth_queue(bp, i) {
2643 rc = bnx2x_vfpf_setup_q(bp, i);
2644 if (rc) {
2645 BNX2X_ERR("Queue setup failed\n");
2646 LOAD_ERROR_EXIT(bp, load_error3);
2647 }
2648 }
Merav Sicron51c1a582012-03-18 10:33:38 +00002649 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002650
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002651 /* Now when Clients are configured we are ready to work */
2652 bp->state = BNX2X_STATE_OPEN;
2653
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002654 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002655 if (IS_PF(bp))
2656 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002657 else /* vf */
2658 rc = bnx2x_vfpf_set_mac(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002659 if (rc) {
2660 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002661 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002662 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002663
Ariel Eliorad5afc82013-01-01 05:22:26 +00002664 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002665 bnx2x_update_max_mf_config(bp, bp->pending_max);
2666 bp->pending_max = 0;
2667 }
2668
Ariel Eliorad5afc82013-01-01 05:22:26 +00002669 if (bp->port.pmf) {
2670 rc = bnx2x_initial_phy_init(bp, load_mode);
2671 if (rc)
2672 LOAD_ERROR_EXIT(bp, load_error3);
2673 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002674 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002675
2676 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002677
2678 /* Initialize Rx filter. */
2679 netif_addr_lock_bh(bp->dev);
2680 bnx2x_set_rx_mode(bp->dev);
2681 netif_addr_unlock_bh(bp->dev);
2682
2683 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002684 switch (load_mode) {
2685 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002686 /* Tx queue should be only reenabled */
2687 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002688 break;
2689
2690 case LOAD_OPEN:
2691 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002692 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002693 break;
2694
2695 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002696 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002697 bp->state = BNX2X_STATE_DIAG;
2698 break;
2699
2700 default:
2701 break;
2702 }
2703
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002704 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002705 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002706 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002707 bnx2x__link_status_update(bp);
2708
2709 /* start the timer */
2710 mod_timer(&bp->timer, jiffies + bp->current_interval);
2711
Merav Sicron55c11942012-11-07 00:45:48 +00002712 if (CNIC_ENABLED(bp))
2713 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002714
Ariel Eliorad5afc82013-01-01 05:22:26 +00002715 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2716 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002717 u32 val;
2718 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2719 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2720 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2721 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2722 }
2723
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002724 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002725 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002726 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002727 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002728 return -EBUSY;
2729 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002730
Barak Witkowski98768792012-06-19 07:48:31 +00002731 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2732 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2733 bnx2x_dcbx_init(bp, false);
2734
Merav Sicron55c11942012-11-07 00:45:48 +00002735 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2736
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002737 return 0;
2738
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002739#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002740load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002741 if (IS_PF(bp)) {
2742 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002743
Ariel Eliorad5afc82013-01-01 05:22:26 +00002744 /* Clean queueable objects */
2745 bnx2x_squeeze_objects(bp);
2746 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002747
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002748 /* Free SKBs, SGEs, TPA pool and driver internals */
2749 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002750 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002752
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002753 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002754 bnx2x_free_irq(bp);
2755load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002756 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002757 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2759 }
2760
2761 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002762load_error1:
2763 bnx2x_napi_disable(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002764
Ariel Elior889b9af2012-01-26 06:01:51 +00002765 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002766 if (IS_PF(bp))
2767 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002768load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002769 bnx2x_free_fp_mem(bp);
2770 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002771 bnx2x_free_mem(bp);
2772
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002773 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002774#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002775}
2776
Ariel Eliorad5afc82013-01-01 05:22:26 +00002777static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2778{
2779 u8 rc = 0, cos, i;
2780
2781 /* Wait until tx fastpath tasks complete */
2782 for_each_tx_queue(bp, i) {
2783 struct bnx2x_fastpath *fp = &bp->fp[i];
2784
2785 for_each_cos_in_tx_queue(fp, cos)
2786 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2787 if (rc)
2788 return rc;
2789 }
2790 return 0;
2791}
2792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002794int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002795{
2796 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002797 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002798
Merav Sicron55c11942012-11-07 00:45:48 +00002799 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2800
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002801 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002802 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002803 u32 val;
2804 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2805 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2806 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2807 }
2808
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002809 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002810 (bp->state == BNX2X_STATE_CLOSED ||
2811 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002812 /* We can get here if the driver has been unloaded
2813 * during parity error recovery and is either waiting for a
2814 * leader to complete or for other functions to unload and
2815 * then ifdown has been issued. In this case we want to
2816 * unload and let other functions to complete a recovery
2817 * process.
2818 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002819 bp->recovery_state = BNX2X_RECOVERY_DONE;
2820 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002821 bnx2x_release_leader_lock(bp);
2822 smp_mb();
2823
Merav Sicron51c1a582012-03-18 10:33:38 +00002824 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2825 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002826 return -EINVAL;
2827 }
2828
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002829 /* Nothing to do during unload if previous bnx2x_nic_load()
2830 * have not completed succesfully - all resourses are released.
2831 *
2832 * we can get here only after unsuccessful ndo_* callback, during which
2833 * dev->IFF_UP flag is still on.
2834 */
2835 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2836 return 0;
2837
2838 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002839 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2840 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2841 */
2842 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2843 smp_mb();
2844
Merav Sicron55c11942012-11-07 00:45:48 +00002845 if (CNIC_LOADED(bp))
2846 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2847
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002848 /* Stop Tx */
2849 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002850 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002851
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002852 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002853
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002854 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002855
Ariel Eliorad5afc82013-01-01 05:22:26 +00002856 if (IS_PF(bp)) {
2857 /* Set ALWAYS_ALIVE bit in shmem */
2858 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2859 bnx2x_drv_pulse(bp);
2860 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2861 bnx2x_save_statistics(bp);
2862 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002863
Ariel Eliorad5afc82013-01-01 05:22:26 +00002864 /* wait till consumers catch up with producers in all queues */
2865 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002866
Ariel Elior9b176b62013-01-01 05:22:28 +00002867 /* if VF indicate to PF this function is going down (PF will delete sp
2868 * elements and clear initializations
2869 */
2870 if (IS_VF(bp))
2871 bnx2x_vfpf_close_vf(bp);
2872 else if (unload_mode != UNLOAD_RECOVERY)
2873 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002874 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002875 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002876 /* Send the UNLOAD_REQUEST to the MCP */
2877 bnx2x_send_unload_req(bp, unload_mode);
2878
2879 /*
2880 * Prevent transactions to host from the functions on the
2881 * engine that doesn't reset global blocks in case of global
2882 * attention once gloabl blocks are reset and gates are opened
2883 * (the engine which leader will perform the recovery
2884 * last).
2885 */
2886 if (!CHIP_IS_E1x(bp))
2887 bnx2x_pf_disable(bp);
2888
2889 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002890 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002891 /* Delete all NAPI objects */
2892 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002893 if (CNIC_LOADED(bp))
2894 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002895 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002896 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002897
2898 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002899 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002900 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002901
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002902 /*
2903 * At this stage no more interrupts will arrive so we may safly clean
2904 * the queueable objects here in case they failed to get cleaned so far.
2905 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002906 if (IS_PF(bp))
2907 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002908
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002909 /* There should be no more pending SP commands at this stage */
2910 bp->sp_state = 0;
2911
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002912 bp->port.pmf = 0;
2913
2914 /* Free SKBs, SGEs, TPA pool and driver internals */
2915 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002916 if (CNIC_LOADED(bp))
2917 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002918 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002919 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002920
Ariel Eliorad5afc82013-01-01 05:22:26 +00002921 bnx2x_free_fp_mem(bp);
2922 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00002923 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002924
Ariel Eliorad5afc82013-01-01 05:22:26 +00002925 if (IS_PF(bp)) {
2926 bnx2x_free_mem(bp);
2927 if (CNIC_LOADED(bp))
2928 bnx2x_free_mem_cnic(bp);
2929 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002930 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00002931 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002932
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002933 /* Check if there are pending parity attentions. If there are - set
2934 * RECOVERY_IN_PROGRESS.
2935 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002936 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002937 bnx2x_set_reset_in_progress(bp);
2938
2939 /* Set RESET_IS_GLOBAL if needed */
2940 if (global)
2941 bnx2x_set_reset_global(bp);
2942 }
2943
2944
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002945 /* The last driver must disable a "close the gate" if there is no
2946 * parity attention or "process kill" pending.
2947 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002948 if (IS_PF(bp) &&
2949 !bnx2x_clear_pf_load(bp) &&
2950 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002951 bnx2x_disable_close_the_gate(bp);
2952
Merav Sicron55c11942012-11-07 00:45:48 +00002953 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2954
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002955 return 0;
2956}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002957
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002958int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2959{
2960 u16 pmcsr;
2961
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002962 /* If there is no power capability, silently succeed */
2963 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002964 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002965 return 0;
2966 }
2967
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002968 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2969
2970 switch (state) {
2971 case PCI_D0:
2972 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2973 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2974 PCI_PM_CTRL_PME_STATUS));
2975
2976 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2977 /* delay required during transition out of D3hot */
2978 msleep(20);
2979 break;
2980
2981 case PCI_D3hot:
2982 /* If there are other clients above don't
2983 shut down the power */
2984 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2985 return 0;
2986 /* Don't shut down the power for emulation and FPGA */
2987 if (CHIP_REV_IS_SLOW(bp))
2988 return 0;
2989
2990 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2991 pmcsr |= 3;
2992
2993 if (bp->wol)
2994 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2995
2996 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2997 pmcsr);
2998
2999 /* No more memory access after this point until
3000 * device is brought back to D0.
3001 */
3002 break;
3003
3004 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003005 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003006 return -EINVAL;
3007 }
3008 return 0;
3009}
3010
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003011/*
3012 * net_device service functions
3013 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003014int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003015{
3016 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003017 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003018 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3019 napi);
3020 struct bnx2x *bp = fp->bp;
3021
3022 while (1) {
3023#ifdef BNX2X_STOP_ON_ERROR
3024 if (unlikely(bp->panic)) {
3025 napi_complete(napi);
3026 return 0;
3027 }
3028#endif
3029
Ariel Elior6383c0b2011-07-14 08:31:57 +00003030 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003031 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3032 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003033
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003034 if (bnx2x_has_rx_work(fp)) {
3035 work_done += bnx2x_rx_int(fp, budget - work_done);
3036
3037 /* must not complete if we consumed full budget */
3038 if (work_done >= budget)
3039 break;
3040 }
3041
3042 /* Fall out from the NAPI loop if needed */
3043 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003044
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003045 /* No need to update SB for FCoE L2 ring as long as
3046 * it's connected to the default SB and the SB
3047 * has been updated when NAPI was scheduled.
3048 */
3049 if (IS_FCOE_FP(fp)) {
3050 napi_complete(napi);
3051 break;
3052 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003053 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003054 /* bnx2x_has_rx_work() reads the status block,
3055 * thus we need to ensure that status block indices
3056 * have been actually read (bnx2x_update_fpsb_idx)
3057 * prior to this check (bnx2x_has_rx_work) so that
3058 * we won't write the "newer" value of the status block
3059 * to IGU (if there was a DMA right after
3060 * bnx2x_has_rx_work and if there is no rmb, the memory
3061 * reading (bnx2x_update_fpsb_idx) may be postponed
3062 * to right before bnx2x_ack_sb). In this case there
3063 * will never be another interrupt until there is
3064 * another update of the status block, while there
3065 * is still unhandled work.
3066 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003067 rmb();
3068
3069 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3070 napi_complete(napi);
3071 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003072 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003073 "Update index to %d\n", fp->fp_hc_idx);
3074 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3075 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003076 IGU_INT_ENABLE, 1);
3077 break;
3078 }
3079 }
3080 }
3081
3082 return work_done;
3083}
3084
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003085/* we split the first BD into headers and data BDs
3086 * to ease the pain of our fellow microcode engineers
3087 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003088 */
3089static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003090 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003091 struct sw_tx_bd *tx_buf,
3092 struct eth_tx_start_bd **tx_bd, u16 hlen,
3093 u16 bd_prod, int nbd)
3094{
3095 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3096 struct eth_tx_bd *d_tx_bd;
3097 dma_addr_t mapping;
3098 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3099
3100 /* first fix first BD */
3101 h_tx_bd->nbd = cpu_to_le16(nbd);
3102 h_tx_bd->nbytes = cpu_to_le16(hlen);
3103
Merav Sicron51c1a582012-03-18 10:33:38 +00003104 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3105 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003106
3107 /* now get a new data BD
3108 * (after the pbd) and fill it */
3109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003110 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003111
3112 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3113 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3114
3115 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3116 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3117 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3118
3119 /* this marks the BD as one that has no individual mapping */
3120 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3121
3122 DP(NETIF_MSG_TX_QUEUED,
3123 "TSO split data size is %d (%x:%x)\n",
3124 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3125
3126 /* update tx_bd */
3127 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3128
3129 return bd_prod;
3130}
3131
Yuval Mintz86564c32013-01-23 03:21:50 +00003132#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3133#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3134static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003135{
Yuval Mintz86564c32013-01-23 03:21:50 +00003136 __sum16 tsum = (__force __sum16) csum;
3137
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003138 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003139 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3140 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003141
3142 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003143 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3144 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003145
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003146 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003147}
3148
3149static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3150{
3151 u32 rc;
3152
3153 if (skb->ip_summed != CHECKSUM_PARTIAL)
3154 rc = XMIT_PLAIN;
3155
3156 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00003157 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003158 rc = XMIT_CSUM_V6;
3159 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3160 rc |= XMIT_CSUM_TCP;
3161
3162 } else {
3163 rc = XMIT_CSUM_V4;
3164 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3165 rc |= XMIT_CSUM_TCP;
3166 }
3167 }
3168
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00003169 if (skb_is_gso_v6(skb))
3170 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3171 else if (skb_is_gso(skb))
3172 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003173
3174 return rc;
3175}
3176
3177#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3178/* check if packet requires linearization (packet is too fragmented)
3179 no need to check fragmentation if page size > 8K (there will be no
3180 violation to FW restrictions) */
3181static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3182 u32 xmit_type)
3183{
3184 int to_copy = 0;
3185 int hlen = 0;
3186 int first_bd_sz = 0;
3187
3188 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3189 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3190
3191 if (xmit_type & XMIT_GSO) {
3192 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3193 /* Check if LSO packet needs to be copied:
3194 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3195 int wnd_size = MAX_FETCH_BD - 3;
3196 /* Number of windows to check */
3197 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3198 int wnd_idx = 0;
3199 int frag_idx = 0;
3200 u32 wnd_sum = 0;
3201
3202 /* Headers length */
3203 hlen = (int)(skb_transport_header(skb) - skb->data) +
3204 tcp_hdrlen(skb);
3205
3206 /* Amount of data (w/o headers) on linear part of SKB*/
3207 first_bd_sz = skb_headlen(skb) - hlen;
3208
3209 wnd_sum = first_bd_sz;
3210
3211 /* Calculate the first sum - it's special */
3212 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3213 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003214 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003215
3216 /* If there was data on linear skb data - check it */
3217 if (first_bd_sz > 0) {
3218 if (unlikely(wnd_sum < lso_mss)) {
3219 to_copy = 1;
3220 goto exit_lbl;
3221 }
3222
3223 wnd_sum -= first_bd_sz;
3224 }
3225
3226 /* Others are easier: run through the frag list and
3227 check all windows */
3228 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3229 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003230 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003231
3232 if (unlikely(wnd_sum < lso_mss)) {
3233 to_copy = 1;
3234 break;
3235 }
3236 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003237 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003238 }
3239 } else {
3240 /* in non-LSO too fragmented packet should always
3241 be linearized */
3242 to_copy = 1;
3243 }
3244 }
3245
3246exit_lbl:
3247 if (unlikely(to_copy))
3248 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003249 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003250 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3251 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3252
3253 return to_copy;
3254}
3255#endif
3256
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003257static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3258 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003259{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003260 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3261 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3262 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003263 if ((xmit_type & XMIT_GSO_V6) &&
3264 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003265 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003266}
3267
3268/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003269 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003270 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003271 * @skb: packet skb
3272 * @pbd: parse BD
3273 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003274 */
3275static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3276 struct eth_tx_parse_bd_e1x *pbd,
3277 u32 xmit_type)
3278{
3279 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003280 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003281 pbd->tcp_flags = pbd_tcp_flags(skb);
3282
3283 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003284 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003285 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003286 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3287 ip_hdr(skb)->daddr,
3288 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003289
3290 } else
3291 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003292 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3293 &ipv6_hdr(skb)->daddr,
3294 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003295
Yuval Mintz86564c32013-01-23 03:21:50 +00003296 pbd->global_data |=
3297 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003298}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003299
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003300/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003301 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003302 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003303 * @bp: driver handle
3304 * @skb: packet skb
3305 * @parsing_data: data to be updated
3306 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003307 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003308 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003309 */
3310static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Yuval Mintz2de67432013-01-23 03:21:43 +00003311 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003312{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003313 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003314 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3315 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3316 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003317
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003318 if (xmit_type & XMIT_CSUM_TCP) {
3319 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3320 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3321 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003322
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003323 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003324 }
3325 /* We support checksum offload for TCP and UDP only.
3326 * No need to pass the UDP header length - it's a constant.
3327 */
3328 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003329}
3330
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003331static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3332 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3333{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003334 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3335
3336 if (xmit_type & XMIT_CSUM_V4)
3337 tx_start_bd->bd_flags.as_bitfield |=
3338 ETH_TX_BD_FLAGS_IP_CSUM;
3339 else
3340 tx_start_bd->bd_flags.as_bitfield |=
3341 ETH_TX_BD_FLAGS_IPV6;
3342
3343 if (!(xmit_type & XMIT_CSUM_TCP))
3344 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003345}
3346
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003347/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003348 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003349 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003350 * @bp: driver handle
3351 * @skb: packet skb
3352 * @pbd: parse BD to be updated
3353 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003354 */
3355static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3356 struct eth_tx_parse_bd_e1x *pbd,
3357 u32 xmit_type)
3358{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003359 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003360
3361 /* for now NS flag is not used in Linux */
3362 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003363 cpu_to_le16(hlen |
3364 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3365 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003366
3367 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003368 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003369
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003370 hlen += pbd->ip_hlen_w;
3371
3372 /* We support checksum offload for TCP and UDP only */
3373 if (xmit_type & XMIT_CSUM_TCP)
3374 hlen += tcp_hdrlen(skb) / 2;
3375 else
3376 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003377
3378 pbd->total_hlen_w = cpu_to_le16(hlen);
3379 hlen = hlen*2;
3380
3381 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003382 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003383
3384 } else {
3385 s8 fix = SKB_CS_OFF(skb); /* signed! */
3386
3387 DP(NETIF_MSG_TX_QUEUED,
3388 "hlen %d fix %d csum before fix %x\n",
3389 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3390
3391 /* HW bug: fixup the CSUM */
3392 pbd->tcp_pseudo_csum =
3393 bnx2x_csum_fix(skb_transport_header(skb),
3394 SKB_CS(skb), fix);
3395
3396 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3397 pbd->tcp_pseudo_csum);
3398 }
3399
3400 return hlen;
3401}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003402
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003403/* called with netif_tx_lock
3404 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3405 * netif_wake_queue()
3406 */
3407netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3408{
3409 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003410
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003411 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003412 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003413 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003414 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003415 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003416 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003417 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003418 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003419 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003420 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003421 dma_addr_t mapping;
3422 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3423 int i;
3424 u8 hlen = 0;
3425 __le16 pkt_size = 0;
3426 struct ethhdr *eth;
3427 u8 mac_type = UNICAST_ADDRESS;
3428
3429#ifdef BNX2X_STOP_ON_ERROR
3430 if (unlikely(bp->panic))
3431 return NETDEV_TX_BUSY;
3432#endif
3433
Ariel Elior6383c0b2011-07-14 08:31:57 +00003434 txq_index = skb_get_queue_mapping(skb);
3435 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003436
Merav Sicron55c11942012-11-07 00:45:48 +00003437 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003438
Merav Sicron65565882012-06-19 07:48:26 +00003439 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003440
3441 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003442 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003443 txq_index, fp_index, txdata_index); */
3444
Ariel Elior6383c0b2011-07-14 08:31:57 +00003445 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003446 DP(NETIF_MSG_TX_QUEUED,
3447 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003448 txdata->cid, fp_index, txdata_index, txdata, fp); */
3449
3450 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003451 skb_shinfo(skb)->nr_frags +
3452 BDS_PER_TX_PKT +
3453 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003454 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003455 if (txdata->tx_ring_size == 0) {
3456 struct bnx2x_eth_q_stats *q_stats =
3457 bnx2x_fp_qstats(bp, txdata->parent_fp);
3458 q_stats->driver_filtered_tx_pkt++;
3459 dev_kfree_skb(skb);
3460 return NETDEV_TX_OK;
3461 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003462 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3463 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003464 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003465
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003466 return NETDEV_TX_BUSY;
3467 }
3468
Merav Sicron51c1a582012-03-18 10:33:38 +00003469 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003470 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003471 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003472 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3473 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003474
3475 eth = (struct ethhdr *)skb->data;
3476
3477 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3478 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3479 if (is_broadcast_ether_addr(eth->h_dest))
3480 mac_type = BROADCAST_ADDRESS;
3481 else
3482 mac_type = MULTICAST_ADDRESS;
3483 }
3484
3485#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3486 /* First, check if we need to linearize the skb (due to FW
3487 restrictions). No need to check fragmentation if page size > 8K
3488 (there will be no violation to FW restrictions) */
3489 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3490 /* Statistics of linearization */
3491 bp->lin_cnt++;
3492 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003493 DP(NETIF_MSG_TX_QUEUED,
3494 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003495 dev_kfree_skb_any(skb);
3496 return NETDEV_TX_OK;
3497 }
3498 }
3499#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003500 /* Map skb linear data for DMA */
3501 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3502 skb_headlen(skb), DMA_TO_DEVICE);
3503 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003504 DP(NETIF_MSG_TX_QUEUED,
3505 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003506 dev_kfree_skb_any(skb);
3507 return NETDEV_TX_OK;
3508 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003509 /*
3510 Please read carefully. First we use one BD which we mark as start,
3511 then we have a parsing info BD (used for TSO or xsum),
3512 and only then we have the rest of the TSO BDs.
3513 (don't forget to mark the last one as last,
3514 and to unmap only AFTER you write to the BD ...)
3515 And above all, all pdb sizes are in words - NOT DWORDS!
3516 */
3517
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003518 /* get current pkt produced now - advance it just before sending packet
3519 * since mapping of pages may fail and cause packet to be dropped
3520 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003521 pkt_prod = txdata->tx_pkt_prod;
3522 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003523
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003524 /* get a tx_buf and first BD
3525 * tx_start_bd may be changed during SPLIT,
3526 * but first_bd will always stay first
3527 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003528 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3529 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003530 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003531
3532 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003533 SET_FLAG(tx_start_bd->general_data,
3534 ETH_TX_START_BD_PARSE_NBDS,
3535 0);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003536
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003537 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003538 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003539
3540 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003541 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003542 tx_buf->skb = skb;
3543 tx_buf->flags = 0;
3544
3545 DP(NETIF_MSG_TX_QUEUED,
3546 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003547 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003548
Jesse Grosseab6d182010-10-20 13:56:03 +00003549 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003550 tx_start_bd->vlan_or_ethertype =
3551 cpu_to_le16(vlan_tx_tag_get(skb));
3552 tx_start_bd->bd_flags.as_bitfield |=
3553 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003554 } else {
3555 /* when transmitting in a vf, start bd must hold the ethertype
3556 * for fw to enforce it
3557 */
Yuval Mintz823e1d92013-01-14 05:11:47 +00003558#ifndef BNX2X_STOP_ON_ERROR
Ariel Eliordc1ba592013-01-01 05:22:30 +00003559 if (IS_VF(bp)) {
Yuval Mintz823e1d92013-01-14 05:11:47 +00003560#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003561 tx_start_bd->vlan_or_ethertype =
3562 cpu_to_le16(ntohs(eth->h_proto));
Yuval Mintz823e1d92013-01-14 05:11:47 +00003563#ifndef BNX2X_STOP_ON_ERROR
Ariel Eliordc1ba592013-01-01 05:22:30 +00003564 } else {
3565 /* used by FW for packet accounting */
3566 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3567 }
Yuval Mintz823e1d92013-01-14 05:11:47 +00003568#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003569 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003570
3571 /* turn on parsing and get a BD */
3572 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003573
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003574 if (xmit_type & XMIT_CSUM)
3575 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003576
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003577 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003578 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003579 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3580 /* Set PBD in checksum offload case */
3581 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003582 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3583 &pbd_e2_parsing_data,
3584 xmit_type);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003585
3586 if (IS_MF_SI(bp) || IS_VF(bp)) {
3587 /* fill in the MAC addresses in the PBD - for local
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003588 * switching
3589 */
3590 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3591 &pbd_e2->src_mac_addr_mid,
3592 &pbd_e2->src_mac_addr_lo,
3593 eth->h_source);
3594 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3595 &pbd_e2->dst_mac_addr_mid,
3596 &pbd_e2->dst_mac_addr_lo,
3597 eth->h_dest);
3598 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003599
3600 SET_FLAG(pbd_e2_parsing_data,
3601 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003602 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003603 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003604 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003605 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3606 /* Set PBD in checksum offload case */
3607 if (xmit_type & XMIT_CSUM)
3608 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003609
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003610 SET_FLAG(global_data,
3611 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3612 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003613 }
3614
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003615 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003616 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3617 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003618 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003619 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3620 pkt_size = tx_start_bd->nbytes;
3621
Merav Sicron51c1a582012-03-18 10:33:38 +00003622 DP(NETIF_MSG_TX_QUEUED,
3623 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003624 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3625 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003626 tx_start_bd->bd_flags.as_bitfield,
3627 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003628
3629 if (xmit_type & XMIT_GSO) {
3630
3631 DP(NETIF_MSG_TX_QUEUED,
3632 "TSO packet len %d hlen %d total len %d tso size %d\n",
3633 skb->len, hlen, skb_headlen(skb),
3634 skb_shinfo(skb)->gso_size);
3635
3636 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3637
3638 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003639 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3640 &tx_start_bd, hlen,
3641 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003642 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003643 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3644 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003645 else
3646 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003647 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003648
3649 /* Set the PBD's parsing_data field if not zero
3650 * (for the chips newer than 57711).
3651 */
3652 if (pbd_e2_parsing_data)
3653 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3654
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003655 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3656
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003657 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003658 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3659 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3660
Eric Dumazet9e903e02011-10-18 21:00:24 +00003661 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3662 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003663 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003664 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003665
Merav Sicron51c1a582012-03-18 10:33:38 +00003666 DP(NETIF_MSG_TX_QUEUED,
3667 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003668
3669 /* we need unmap all buffers already mapped
3670 * for this SKB;
3671 * first_bd->nbd need to be properly updated
3672 * before call to bnx2x_free_tx_pkt
3673 */
3674 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003675 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003676 TX_BD(txdata->tx_pkt_prod),
3677 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003678 return NETDEV_TX_OK;
3679 }
3680
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003681 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003682 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003683 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003684 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003685
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003686 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3687 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003688 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3689 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003690 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003691
3692 DP(NETIF_MSG_TX_QUEUED,
3693 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3694 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3695 le16_to_cpu(tx_data_bd->nbytes));
3696 }
3697
3698 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3699
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003700 /* update with actual num BDs */
3701 first_bd->nbd = cpu_to_le16(nbd);
3702
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003703 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3704
3705 /* now send a tx doorbell, counting the next BD
3706 * if the packet contains or ends with it
3707 */
3708 if (TX_BD_POFF(bd_prod) < nbd)
3709 nbd++;
3710
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003711 /* total_pkt_bytes should be set on the first data BD if
3712 * it's not an LSO packet and there is more than one
3713 * data BD. In this case pkt_size is limited by an MTU value.
3714 * However we prefer to set it for an LSO packet (while we don't
3715 * have to) in order to save some CPU cycles in a none-LSO
3716 * case, when we much more care about them.
3717 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003718 if (total_pkt_bd != NULL)
3719 total_pkt_bd->total_pkt_bytes = pkt_size;
3720
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003721 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003722 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003723 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003724 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3725 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3726 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3727 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003728 if (pbd_e2)
3729 DP(NETIF_MSG_TX_QUEUED,
3730 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3731 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3732 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3733 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3734 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003735 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3736
Tom Herbert2df1a702011-11-28 16:33:37 +00003737 netdev_tx_sent_queue(txq, skb->len);
3738
Willem de Bruijn8373c572012-04-27 09:04:06 +00003739 skb_tx_timestamp(skb);
3740
Ariel Elior6383c0b2011-07-14 08:31:57 +00003741 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003742 /*
3743 * Make sure that the BD data is updated before updating the producer
3744 * since FW might read the BD right after the producer is updated.
3745 * This is only applicable for weak-ordered memory model archs such
3746 * as IA-64. The following barrier is also mandatory since FW will
3747 * assumes packets must have BDs.
3748 */
3749 wmb();
3750
Ariel Elior6383c0b2011-07-14 08:31:57 +00003751 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003752 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003753
Ariel Elior6383c0b2011-07-14 08:31:57 +00003754 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003755
3756 mmiowb();
3757
Ariel Elior6383c0b2011-07-14 08:31:57 +00003758 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003759
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003760 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003761 netif_tx_stop_queue(txq);
3762
3763 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3764 * ordering of set_bit() in netif_tx_stop_queue() and read of
3765 * fp->bd_tx_cons */
3766 smp_mb();
3767
Barak Witkowski15192a82012-06-19 07:48:28 +00003768 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003769 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003770 netif_tx_wake_queue(txq);
3771 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003772 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003773
3774 return NETDEV_TX_OK;
3775}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003776
Ariel Elior6383c0b2011-07-14 08:31:57 +00003777/**
3778 * bnx2x_setup_tc - routine to configure net_device for multi tc
3779 *
3780 * @netdev: net device to configure
3781 * @tc: number of traffic classes to enable
3782 *
3783 * callback connected to the ndo_setup_tc function pointer
3784 */
3785int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3786{
3787 int cos, prio, count, offset;
3788 struct bnx2x *bp = netdev_priv(dev);
3789
3790 /* setup tc must be called under rtnl lock */
3791 ASSERT_RTNL();
3792
3793 /* no traffic classes requested. aborting */
3794 if (!num_tc) {
3795 netdev_reset_tc(dev);
3796 return 0;
3797 }
3798
3799 /* requested to support too many traffic classes */
3800 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003801 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3802 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003803 return -EINVAL;
3804 }
3805
3806 /* declare amount of supported traffic classes */
3807 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003808 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003809 return -EINVAL;
3810 }
3811
3812 /* configure priority to traffic class mapping */
3813 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3814 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003815 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3816 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003817 prio, bp->prio_to_cos[prio]);
3818 }
3819
3820
3821 /* Use this configuration to diffrentiate tc0 from other COSes
3822 This can be used for ets or pfc, and save the effort of setting
3823 up a multio class queue disc or negotiating DCBX with a switch
3824 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003825 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003826 for (prio = 1; prio < 16; prio++) {
3827 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003828 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003829 } */
3830
3831 /* configure traffic class to transmission queue mapping */
3832 for (cos = 0; cos < bp->max_cos; cos++) {
3833 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003834 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003835 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003836 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3837 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003838 cos, offset, count);
3839 }
3840
3841 return 0;
3842}
3843
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003844/* called with rtnl_lock */
3845int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3846{
3847 struct sockaddr *addr = p;
3848 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003849 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003850
Merav Sicron51c1a582012-03-18 10:33:38 +00003851 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3852 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003853 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003854 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003855
Barak Witkowskia3348722012-04-23 03:04:46 +00003856 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3857 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003858 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003859 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003860 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003861
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003862 if (netif_running(dev)) {
3863 rc = bnx2x_set_eth_mac(bp, false);
3864 if (rc)
3865 return rc;
3866 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003867
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003868 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3869
3870 if (netif_running(dev))
3871 rc = bnx2x_set_eth_mac(bp, true);
3872
3873 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003874}
3875
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003876static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3877{
3878 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3879 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003880 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003881
3882 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00003883
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003884 if (IS_FCOE_IDX(fp_index)) {
3885 memset(sb, 0, sizeof(union host_hc_status_block));
3886 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003887 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003888 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003889 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003890 BNX2X_PCI_FREE(sb->e2_sb,
3891 bnx2x_fp(bp, fp_index,
3892 status_blk_mapping),
3893 sizeof(struct host_hc_status_block_e2));
3894 else
3895 BNX2X_PCI_FREE(sb->e1x_sb,
3896 bnx2x_fp(bp, fp_index,
3897 status_blk_mapping),
3898 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003899 }
Merav Sicron55c11942012-11-07 00:45:48 +00003900
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003901 /* Rx */
3902 if (!skip_rx_queue(bp, fp_index)) {
3903 bnx2x_free_rx_bds(fp);
3904
3905 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3906 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3907 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3908 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3909 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3910
3911 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3912 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3913 sizeof(struct eth_fast_path_rx_cqe) *
3914 NUM_RCQ_BD);
3915
3916 /* SGE ring */
3917 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3918 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3919 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3920 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3921 }
3922
3923 /* Tx */
3924 if (!skip_tx_queue(bp, fp_index)) {
3925 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003926 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003927 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003928
Merav Sicron51c1a582012-03-18 10:33:38 +00003929 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003930 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003931 fp_index, cos, txdata->cid);
3932
3933 BNX2X_FREE(txdata->tx_buf_ring);
3934 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3935 txdata->tx_desc_mapping,
3936 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3937 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003938 }
3939 /* end of fastpath */
3940}
3941
Merav Sicron55c11942012-11-07 00:45:48 +00003942void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3943{
3944 int i;
3945 for_each_cnic_queue(bp, i)
3946 bnx2x_free_fp_mem_at(bp, i);
3947}
3948
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003949void bnx2x_free_fp_mem(struct bnx2x *bp)
3950{
3951 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00003952 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003953 bnx2x_free_fp_mem_at(bp, i);
3954}
3955
Eric Dumazet1191cb82012-04-27 21:39:21 +00003956static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003957{
3958 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003959 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003960 bnx2x_fp(bp, index, sb_index_values) =
3961 (__le16 *)status_blk.e2_sb->sb.index_values;
3962 bnx2x_fp(bp, index, sb_running_index) =
3963 (__le16 *)status_blk.e2_sb->sb.running_index;
3964 } else {
3965 bnx2x_fp(bp, index, sb_index_values) =
3966 (__le16 *)status_blk.e1x_sb->sb.index_values;
3967 bnx2x_fp(bp, index, sb_running_index) =
3968 (__le16 *)status_blk.e1x_sb->sb.running_index;
3969 }
3970}
3971
Eric Dumazet1191cb82012-04-27 21:39:21 +00003972/* Returns the number of actually allocated BDs */
3973static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3974 int rx_ring_size)
3975{
3976 struct bnx2x *bp = fp->bp;
3977 u16 ring_prod, cqe_ring_prod;
3978 int i, failure_cnt = 0;
3979
3980 fp->rx_comp_cons = 0;
3981 cqe_ring_prod = ring_prod = 0;
3982
3983 /* This routine is called only during fo init so
3984 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3985 */
3986 for (i = 0; i < rx_ring_size; i++) {
3987 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3988 failure_cnt++;
3989 continue;
3990 }
3991 ring_prod = NEXT_RX_IDX(ring_prod);
3992 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3993 WARN_ON(ring_prod <= (i - failure_cnt));
3994 }
3995
3996 if (failure_cnt)
3997 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3998 i - failure_cnt, fp->index);
3999
4000 fp->rx_bd_prod = ring_prod;
4001 /* Limit the CQE producer by the CQE ring size */
4002 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4003 cqe_ring_prod);
4004 fp->rx_pkt = fp->rx_calls = 0;
4005
Barak Witkowski15192a82012-06-19 07:48:28 +00004006 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004007
4008 return i - failure_cnt;
4009}
4010
4011static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4012{
4013 int i;
4014
4015 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4016 struct eth_rx_cqe_next_page *nextpg;
4017
4018 nextpg = (struct eth_rx_cqe_next_page *)
4019 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4020 nextpg->addr_hi =
4021 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4022 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4023 nextpg->addr_lo =
4024 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4025 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4026 }
4027}
4028
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004029static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4030{
4031 union host_hc_status_block *sb;
4032 struct bnx2x_fastpath *fp = &bp->fp[index];
4033 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004034 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004035 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004036
Barak Witkowskia3348722012-04-23 03:04:46 +00004037 if (!bp->rx_ring_size &&
4038 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004039 rx_ring_size = MIN_RX_SIZE_NONTPA;
4040 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004041 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004042 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4043
Yuval Mintz065f8b92012-10-03 04:22:59 +00004044 if (CHIP_IS_E3(bp)) {
4045 u32 cfg = SHMEM_RD(bp,
4046 dev_info.port_hw_config[BP_PORT(bp)].
4047 default_cfg);
4048
4049 /* Decrease ring size for 1G functions */
4050 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4051 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4052 rx_ring_size /= 10;
4053 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004054
David S. Miller8decf862011-09-22 03:23:13 -04004055 /* allocate at least number of buffers required by FW */
4056 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4057 MIN_RX_SIZE_TPA, rx_ring_size);
4058
4059 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004060 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004061 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004062
Yuval Mintz04c46732013-01-23 03:21:46 +00004063 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4064
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004065 /* Common */
4066 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004067
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004068 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004069 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004070 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004071 BNX2X_PCI_ALLOC(sb->e2_sb,
4072 &bnx2x_fp(bp, index, status_blk_mapping),
4073 sizeof(struct host_hc_status_block_e2));
4074 else
4075 BNX2X_PCI_ALLOC(sb->e1x_sb,
4076 &bnx2x_fp(bp, index, status_blk_mapping),
4077 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004078 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004079
4080 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4081 * set shortcuts for it.
4082 */
4083 if (!IS_FCOE_IDX(index))
4084 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004085
4086 /* Tx */
4087 if (!skip_tx_queue(bp, index)) {
4088 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004089 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004090 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004091
Merav Sicron51c1a582012-03-18 10:33:38 +00004092 DP(NETIF_MSG_IFUP,
4093 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004094 index, cos);
4095
4096 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004097 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004098 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4099 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004100 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004101 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004102 }
4103
4104 /* Rx */
4105 if (!skip_rx_queue(bp, index)) {
4106 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4107 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4108 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4109 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4110 &bnx2x_fp(bp, index, rx_desc_mapping),
4111 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4112
4113 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4114 &bnx2x_fp(bp, index, rx_comp_mapping),
4115 sizeof(struct eth_fast_path_rx_cqe) *
4116 NUM_RCQ_BD);
4117
4118 /* SGE ring */
4119 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4120 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4121 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4122 &bnx2x_fp(bp, index, rx_sge_mapping),
4123 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4124 /* RX BD ring */
4125 bnx2x_set_next_page_rx_bd(fp);
4126
4127 /* CQ ring */
4128 bnx2x_set_next_page_rx_cq(fp);
4129
4130 /* BDs */
4131 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4132 if (ring_size < rx_ring_size)
4133 goto alloc_mem_err;
4134 }
4135
4136 return 0;
4137
4138/* handles low memory cases */
4139alloc_mem_err:
4140 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4141 index, ring_size);
4142 /* FW will drop all packets if queue is not big enough,
4143 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004144 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004145 */
4146 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004147 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004148 /* release memory allocated for this queue */
4149 bnx2x_free_fp_mem_at(bp, index);
4150 return -ENOMEM;
4151 }
4152 return 0;
4153}
4154
Merav Sicron55c11942012-11-07 00:45:48 +00004155int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004156{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004157 if (!NO_FCOE(bp))
4158 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004159 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004160 /* we will fail load process instead of mark
4161 * NO_FCOE_FLAG
4162 */
4163 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004164
4165 return 0;
4166}
4167
4168int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4169{
4170 int i;
4171
4172 /* 1. Allocate FP for leading - fatal if error
4173 * 2. Allocate RSS - fix number of queues if error
4174 */
4175
4176 /* leading */
4177 if (bnx2x_alloc_fp_mem_at(bp, 0))
4178 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004179
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004180 /* RSS */
4181 for_each_nondefault_eth_queue(bp, i)
4182 if (bnx2x_alloc_fp_mem_at(bp, i))
4183 break;
4184
4185 /* handle memory failures */
4186 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4187 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4188
4189 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004190 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004191 if (CNIC_SUPPORT(bp))
4192 /* move non eth FPs next to last eth FP
4193 * must be done in that order
4194 * FCOE_IDX < FWD_IDX < OOO_IDX
4195 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004196
Merav Sicron55c11942012-11-07 00:45:48 +00004197 /* move FCoE fp even NO_FCOE_FLAG is on */
4198 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4199 bp->num_ethernet_queues -= delta;
4200 bp->num_queues = bp->num_ethernet_queues +
4201 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004202 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4203 bp->num_queues + delta, bp->num_queues);
4204 }
4205
4206 return 0;
4207}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004208
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004209void bnx2x_free_mem_bp(struct bnx2x *bp)
4210{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004211 int i;
4212
4213 for (i = 0; i < bp->fp_array_size; i++)
4214 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004215 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004216 kfree(bp->sp_objs);
4217 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004218 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004219 kfree(bp->msix_table);
4220 kfree(bp->ilt);
4221}
4222
Bill Pemberton0329aba2012-12-03 09:24:24 -05004223int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004224{
4225 struct bnx2x_fastpath *fp;
4226 struct msix_entry *tbl;
4227 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004228 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004229 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004230 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004231
Ariel Elior6383c0b2011-07-14 08:31:57 +00004232 /*
4233 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004234 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004235 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004236 msix_table_size = bp->igu_sb_cnt;
4237 if (IS_PF(bp))
4238 msix_table_size++;
4239 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004240
4241 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004242 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004243 bp->fp_array_size = fp_array_size;
4244 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004245
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004246 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004247 if (!fp)
4248 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004249 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004250 fp[i].tpa_info =
4251 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4252 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4253 if (!(fp[i].tpa_info))
4254 goto alloc_err;
4255 }
4256
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004257 bp->fp = fp;
4258
Barak Witkowski15192a82012-06-19 07:48:28 +00004259 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004260 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004261 GFP_KERNEL);
4262 if (!bp->sp_objs)
4263 goto alloc_err;
4264
4265 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004266 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004267 GFP_KERNEL);
4268 if (!bp->fp_stats)
4269 goto alloc_err;
4270
Merav Sicron65565882012-06-19 07:48:26 +00004271 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004272 txq_array_size =
4273 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4274 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4275
4276 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4277 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004278 if (!bp->bnx2x_txq)
4279 goto alloc_err;
4280
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004281 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004282 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004283 if (!tbl)
4284 goto alloc_err;
4285 bp->msix_table = tbl;
4286
4287 /* ilt */
4288 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4289 if (!ilt)
4290 goto alloc_err;
4291 bp->ilt = ilt;
4292
4293 return 0;
4294alloc_err:
4295 bnx2x_free_mem_bp(bp);
4296 return -ENOMEM;
4297
4298}
4299
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004300int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004301{
4302 struct bnx2x *bp = netdev_priv(dev);
4303
4304 if (unlikely(!netif_running(dev)))
4305 return 0;
4306
Yuval Mintz5d07d862012-09-13 02:56:21 +00004307 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004308 return bnx2x_nic_load(bp, LOAD_NORMAL);
4309}
4310
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004311int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4312{
4313 u32 sel_phy_idx = 0;
4314 if (bp->link_params.num_phys <= 1)
4315 return INT_PHY;
4316
4317 if (bp->link_vars.link_up) {
4318 sel_phy_idx = EXT_PHY1;
4319 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4320 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4321 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4322 sel_phy_idx = EXT_PHY2;
4323 } else {
4324
4325 switch (bnx2x_phy_selection(&bp->link_params)) {
4326 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4327 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4328 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4329 sel_phy_idx = EXT_PHY1;
4330 break;
4331 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4332 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4333 sel_phy_idx = EXT_PHY2;
4334 break;
4335 }
4336 }
4337
4338 return sel_phy_idx;
4339
4340}
4341int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4342{
4343 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4344 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004345 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004346 * swapping is enabled). So when swapping is enabled, we need to reverse
4347 * the configuration
4348 */
4349
4350 if (bp->link_params.multi_phy_config &
4351 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4352 if (sel_phy_idx == EXT_PHY1)
4353 sel_phy_idx = EXT_PHY2;
4354 else if (sel_phy_idx == EXT_PHY2)
4355 sel_phy_idx = EXT_PHY1;
4356 }
4357 return LINK_CONFIG_IDX(sel_phy_idx);
4358}
4359
Merav Sicron55c11942012-11-07 00:45:48 +00004360#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004361int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4362{
4363 struct bnx2x *bp = netdev_priv(dev);
4364 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4365
4366 switch (type) {
4367 case NETDEV_FCOE_WWNN:
4368 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4369 cp->fcoe_wwn_node_name_lo);
4370 break;
4371 case NETDEV_FCOE_WWPN:
4372 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4373 cp->fcoe_wwn_port_name_lo);
4374 break;
4375 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004376 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004377 return -EINVAL;
4378 }
4379
4380 return 0;
4381}
4382#endif
4383
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004384/* called with rtnl_lock */
4385int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4386{
4387 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004388
4389 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004390 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004391 return -EAGAIN;
4392 }
4393
4394 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004395 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4396 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004397 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004398 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004399
4400 /* This does not race with packet allocation
4401 * because the actual alloc size is
4402 * only updated as part of load
4403 */
4404 dev->mtu = new_mtu;
4405
Michał Mirosław66371c42011-04-12 09:38:23 +00004406 return bnx2x_reload_if_running(dev);
4407}
4408
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004409netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004410 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004411{
4412 struct bnx2x *bp = netdev_priv(dev);
4413
4414 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004415 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004416 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004417 features &= ~NETIF_F_GRO;
4418 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004419
4420 return features;
4421}
4422
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004423int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004424{
4425 struct bnx2x *bp = netdev_priv(dev);
4426 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004427 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004428
4429 if (features & NETIF_F_LRO)
4430 flags |= TPA_ENABLE_FLAG;
4431 else
4432 flags &= ~TPA_ENABLE_FLAG;
4433
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004434 if (features & NETIF_F_GRO)
4435 flags |= GRO_ENABLE_FLAG;
4436 else
4437 flags &= ~GRO_ENABLE_FLAG;
4438
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004439 if (features & NETIF_F_LOOPBACK) {
4440 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4441 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4442 bnx2x_reload = true;
4443 }
4444 } else {
4445 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4446 bp->link_params.loopback_mode = LOOPBACK_NONE;
4447 bnx2x_reload = true;
4448 }
4449 }
4450
Michał Mirosław66371c42011-04-12 09:38:23 +00004451 if (flags ^ bp->flags) {
4452 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004453 bnx2x_reload = true;
4454 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004455
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004456 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004457 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4458 return bnx2x_reload_if_running(dev);
4459 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004460 }
4461
Michał Mirosław66371c42011-04-12 09:38:23 +00004462 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004463}
4464
4465void bnx2x_tx_timeout(struct net_device *dev)
4466{
4467 struct bnx2x *bp = netdev_priv(dev);
4468
4469#ifdef BNX2X_STOP_ON_ERROR
4470 if (!bp->panic)
4471 bnx2x_panic();
4472#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004473
4474 smp_mb__before_clear_bit();
4475 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4476 smp_mb__after_clear_bit();
4477
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004478 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004479 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004480}
4481
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004482int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4483{
4484 struct net_device *dev = pci_get_drvdata(pdev);
4485 struct bnx2x *bp;
4486
4487 if (!dev) {
4488 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4489 return -ENODEV;
4490 }
4491 bp = netdev_priv(dev);
4492
4493 rtnl_lock();
4494
4495 pci_save_state(pdev);
4496
4497 if (!netif_running(dev)) {
4498 rtnl_unlock();
4499 return 0;
4500 }
4501
4502 netif_device_detach(dev);
4503
Yuval Mintz5d07d862012-09-13 02:56:21 +00004504 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004505
4506 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4507
4508 rtnl_unlock();
4509
4510 return 0;
4511}
4512
4513int bnx2x_resume(struct pci_dev *pdev)
4514{
4515 struct net_device *dev = pci_get_drvdata(pdev);
4516 struct bnx2x *bp;
4517 int rc;
4518
4519 if (!dev) {
4520 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4521 return -ENODEV;
4522 }
4523 bp = netdev_priv(dev);
4524
4525 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004526 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004527 return -EAGAIN;
4528 }
4529
4530 rtnl_lock();
4531
4532 pci_restore_state(pdev);
4533
4534 if (!netif_running(dev)) {
4535 rtnl_unlock();
4536 return 0;
4537 }
4538
4539 bnx2x_set_power_state(bp, PCI_D0);
4540 netif_device_attach(dev);
4541
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004542 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4543
4544 rtnl_unlock();
4545
4546 return rc;
4547}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004548
4549
4550void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4551 u32 cid)
4552{
4553 /* ustorm cxt validation */
4554 cxt->ustorm_ag_context.cdu_usage =
4555 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4556 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4557 /* xcontext validation */
4558 cxt->xstorm_ag_context.cdu_reserved =
4559 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4560 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4561}
4562
Eric Dumazet1191cb82012-04-27 21:39:21 +00004563static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4564 u8 fw_sb_id, u8 sb_index,
4565 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004566{
4567
4568 u32 addr = BAR_CSTRORM_INTMEM +
4569 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4570 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004571 DP(NETIF_MSG_IFUP,
4572 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4573 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004574}
4575
Eric Dumazet1191cb82012-04-27 21:39:21 +00004576static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4577 u16 fw_sb_id, u8 sb_index,
4578 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004579{
4580 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4581 u32 addr = BAR_CSTRORM_INTMEM +
4582 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4583 u16 flags = REG_RD16(bp, addr);
4584 /* clear and set */
4585 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4586 flags |= enable_flag;
4587 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004588 DP(NETIF_MSG_IFUP,
4589 "port %x fw_sb_id %d sb_index %d disable %d\n",
4590 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004591}
4592
4593void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4594 u8 sb_index, u8 disable, u16 usec)
4595{
4596 int port = BP_PORT(bp);
4597 u8 ticks = usec / BNX2X_BTR;
4598
4599 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4600
4601 disable = disable ? 1 : (usec ? 0 : 1);
4602 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4603}