blob: 3651f6d449ebe7a245139aaf792fd1d6cac6497f [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000040 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000042 * source onto the target. Update txdata pointers and related
43 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000044 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000049 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000053 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000055
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000059 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000062
Barak Witkowski15192a82012-06-19 07:48:28 +000063 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
Merav Sicron65565882012-06-19 07:48:26 +000069 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
Yuval Mintz4864a162013-01-10 04:53:39 +000082 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +000084 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000086}
87
Ariel Elior8ca5e172013-01-01 05:22:34 +000088/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000113 }
114}
115
David S. Miller4b87f922013-01-15 15:05:59 -0500116/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000149{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
Merav Sicron51c1a582012-03-18 10:33:38 +0000160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000161 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000162
163 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000167
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300168
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000203 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000207
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000208 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
Ariel Elior6383c0b2011-07-14 08:31:57 +0000215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000217 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000219 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
Ariel Elior6383c0b2011-07-14 08:31:57 +0000226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
Merav Sicron51c1a582012-03-18 10:33:38 +0000235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238
Tom Herbert2df1a702011-11-28 16:33:37 +0000239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000240 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000242 sw_cons++;
243 }
244
Tom Herbert2df1a702011-11-28 16:33:37 +0000245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
Ariel Elior6383c0b2011-07-14 08:31:57 +0000247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000258 */
259 smp_mb();
260
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296{
297 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000315 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317
318 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
Yuval Mintz2de67432013-01-23 03:21:43 +0000346/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000352{
Yuval Mintz2de67432013-01-23 03:21:43 +0000353 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000354 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000361 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000362 }
363 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000364 return 0;
365}
366
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000368 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300369 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
Eric Dumazete52fcb22011-11-14 06:05:34 +0000383 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300384 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000385 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000395 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
399
Eric Dumazete52fcb22011-11-14 06:05:34 +0000400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000403 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000421 tpa_info->gro_size = gro_size;
422 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300423
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000441 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000442 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000443 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000447 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000451 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000452 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000456{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000457 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300458 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000459 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000463 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000485}
486
Eric Dumazet1191cb82012-04-27 21:39:21 +0000487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000501 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000528 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536
537 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000542
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000563
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000572 return err;
573 }
574
575 /* Unmap the page as we r going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000578 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000579 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000594
595 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000596 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
Eric Dumazetd46d1322012-12-10 12:16:06 +0000605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
Yuval Mintz99690852013-01-14 05:11:49 +0000621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000645
646static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
648{
649 skb_set_network_header(skb, 0);
650 gro_func(bp, skb);
651 tcp_gro_complete(skb);
652}
Yuval Mintz99690852013-01-14 05:11:49 +0000653#endif
654
655static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
656 struct sk_buff *skb)
657{
658#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000659 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000660 switch (be16_to_cpu(skb->protocol)) {
661 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000663 break;
664 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000666 break;
667 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000669 be16_to_cpu(skb->protocol));
670 }
Yuval Mintz99690852013-01-14 05:11:49 +0000671 }
672#endif
673 napi_gro_receive(&fp->napi, skb);
674}
675
Eric Dumazet1191cb82012-04-27 21:39:21 +0000676static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677 struct bnx2x_agg_info *tpa_info,
678 u16 pages,
679 struct eth_end_agg_rx_cqe *cqe,
680 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000681{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300682 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000683 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300684 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000685 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000686 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300687 u8 old_tpa_state = tpa_info->tpa_state;
688
689 tpa_info->tpa_state = BNX2X_TPA_STOP;
690
691 /* If we there was an error during the handling of the TPA_START -
692 * drop this aggregation.
693 */
694 if (old_tpa_state == BNX2X_TPA_ERROR)
695 goto drop;
696
Eric Dumazete52fcb22011-11-14 06:05:34 +0000697 /* Try to allocate the new data */
Eric Dumazetd46d1322012-12-10 12:16:06 +0000698 new_data = bnx2x_frag_alloc(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000699 /* Unmap skb in the pool anyway, as we are going to change
700 pool entry status to BNX2X_TPA_STOP even if new skb allocation
701 fails. */
702 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800703 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000704 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000705 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000706
Eric Dumazete52fcb22011-11-14 06:05:34 +0000707 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000708#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800709 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000710 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800711 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000712 bnx2x_panic();
713 return;
714 }
715#endif
716
Eric Dumazete52fcb22011-11-14 06:05:34 +0000717 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000718 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000719 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000720 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000721
722 skb->protocol = eth_type_trans(skb, bp->dev);
723 skb->ip_summed = CHECKSUM_UNNECESSARY;
724
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000729 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000730 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000731 DP(NETIF_MSG_RX_STATUS,
732 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000733 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000734 }
735
736
Eric Dumazete52fcb22011-11-14 06:05:34 +0000737 /* put new data in bin */
738 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000739
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300740 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000742 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300743drop:
744 /* drop the packet and keep the buffer in the bin */
745 DP(NETIF_MSG_RX_STATUS,
746 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000747 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000748}
749
Eric Dumazet1191cb82012-04-27 21:39:21 +0000750static int bnx2x_alloc_rx_data(struct bnx2x *bp,
751 struct bnx2x_fastpath *fp, u16 index)
752{
753 u8 *data;
754 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
755 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
756 dma_addr_t mapping;
757
Eric Dumazetd46d1322012-12-10 12:16:06 +0000758 data = bnx2x_frag_alloc(fp);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000759 if (unlikely(data == NULL))
760 return -ENOMEM;
761
762 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
763 fp->rx_buf_size,
764 DMA_FROM_DEVICE);
765 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000766 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000767 BNX2X_ERR("Can't map rx data\n");
768 return -ENOMEM;
769 }
770
771 rx_buf->data = data;
772 dma_unmap_addr_set(rx_buf, mapping, mapping);
773
774 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
775 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
776
777 return 0;
778}
779
Barak Witkowski15192a82012-06-19 07:48:28 +0000780static
781void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
782 struct bnx2x_fastpath *fp,
783 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000784{
Michal Schmidte4889212012-09-13 12:59:44 +0000785 /* Do nothing if no L4 csum validation was done.
786 * We do not check whether IP csum was validated. For IPv4 we assume
787 * that if the card got as far as validating the L4 csum, it also
788 * validated the IP csum. IPv6 has no IP csum.
789 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000790 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000791 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000792 return;
793
Michal Schmidte4889212012-09-13 12:59:44 +0000794 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000795
796 if (cqe->fast_path_cqe.type_error_flags &
797 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
798 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000799 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000800 else
801 skb->ip_summed = CHECKSUM_UNNECESSARY;
802}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000803
804int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
805{
806 struct bnx2x *bp = fp->bp;
807 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
808 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
809 int rx_pkt = 0;
810
811#ifdef BNX2X_STOP_ON_ERROR
812 if (unlikely(bp->panic))
813 return 0;
814#endif
815
816 /* CQ "next element" is of the size of the regular element,
817 that's why it's ok here */
818 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
819 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
820 hw_comp_cons++;
821
822 bd_cons = fp->rx_bd_cons;
823 bd_prod = fp->rx_bd_prod;
824 bd_prod_fw = bd_prod;
825 sw_comp_cons = fp->rx_comp_cons;
826 sw_comp_prod = fp->rx_comp_prod;
827
828 /* Memory barrier necessary as speculative reads of the rx
829 * buffer can be ahead of the index in the status block
830 */
831 rmb();
832
833 DP(NETIF_MSG_RX_STATUS,
834 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
835 fp->index, hw_comp_cons, sw_comp_cons);
836
837 while (sw_comp_cons != hw_comp_cons) {
838 struct sw_rx_bd *rx_buf = NULL;
839 struct sk_buff *skb;
840 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300841 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000842 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300843 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000844 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000845 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000846 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000847
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300848#ifdef BNX2X_STOP_ON_ERROR
849 if (unlikely(bp->panic))
850 return 0;
851#endif
852
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000853 comp_ring_cons = RCQ_BD(sw_comp_cons);
854 bd_prod = RX_BD(bd_prod);
855 bd_cons = RX_BD(bd_cons);
856
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000857 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300858 cqe_fp = &cqe->fast_path_cqe;
859 cqe_fp_flags = cqe_fp->type_error_flags;
860 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000861
Merav Sicron51c1a582012-03-18 10:33:38 +0000862 DP(NETIF_MSG_RX_STATUS,
863 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
864 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300865 cqe_fp_flags, cqe_fp->status_flags,
866 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000867 le16_to_cpu(cqe_fp->vlan_tag),
868 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000869
870 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300871 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000872 bnx2x_sp_event(fp, cqe);
873 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000874 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000875
Eric Dumazete52fcb22011-11-14 06:05:34 +0000876 rx_buf = &fp->rx_buf_ring[bd_cons];
877 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000878
Eric Dumazete52fcb22011-11-14 06:05:34 +0000879 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000880 struct bnx2x_agg_info *tpa_info;
881 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300882#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000883 /* sanity check */
884 if (fp->disable_tpa &&
885 (CQE_TYPE_START(cqe_fp_type) ||
886 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000887 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000888 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300889#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000890
Eric Dumazete52fcb22011-11-14 06:05:34 +0000891 if (CQE_TYPE_START(cqe_fp_type)) {
892 u16 queue = cqe_fp->queue_index;
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_start on queue %d\n",
895 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000896
Eric Dumazete52fcb22011-11-14 06:05:34 +0000897 bnx2x_tpa_start(fp, queue,
898 bd_cons, bd_prod,
899 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000900
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000901 goto next_rx;
902
903 }
904 queue = cqe->end_agg_cqe.queue_index;
905 tpa_info = &fp->tpa_info[queue];
906 DP(NETIF_MSG_RX_STATUS,
907 "calling tpa_stop on queue %d\n",
908 queue);
909
910 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
911 tpa_info->len_on_bd;
912
913 if (fp->mode == TPA_MODE_GRO)
914 pages = (frag_size + tpa_info->full_page - 1) /
915 tpa_info->full_page;
916 else
917 pages = SGE_PAGE_ALIGN(frag_size) >>
918 SGE_PAGE_SHIFT;
919
920 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
921 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000922#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000923 if (bp->panic)
924 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000925#endif
926
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000927 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
928 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000929 }
930 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000931 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000932 pad = cqe_fp->placement_offset;
933 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000934 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000935 pad + RX_COPY_THRESH,
936 DMA_FROM_DEVICE);
937 pad += NET_SKB_PAD;
938 prefetch(data + pad); /* speedup eth_type_trans() */
939 /* is this an error packet? */
940 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000941 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000942 "ERROR flags %x rx packet %u\n",
943 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000944 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000945 goto reuse_rx;
946 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000947
Eric Dumazete52fcb22011-11-14 06:05:34 +0000948 /* Since we don't have a jumbo ring
949 * copy small packets if mtu > 1500
950 */
951 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
952 (len <= RX_COPY_THRESH)) {
953 skb = netdev_alloc_skb_ip_align(bp->dev, len);
954 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000955 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000956 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000957 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000958 goto reuse_rx;
959 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 memcpy(skb->data, data + pad, len);
961 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
962 } else {
963 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000964 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000965 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800966 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000967 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000968 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000969 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000970 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000971 bnx2x_fp_qstats(bp, fp)->
972 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000973 goto next_rx;
974 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000976 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000977 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
978 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000979 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000981 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000982 goto next_rx;
983 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000984 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000985
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000986 skb_put(skb, len);
987 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000988
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000989 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000990 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
991 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000992
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000993 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000994
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000995 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000996 bnx2x_csum_validate(skb, cqe, fp,
997 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000998
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000999 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001000
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001001 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1002 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001003 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001004 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +00001005 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001006
1007
1008next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001009 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010
1011 bd_cons = NEXT_RX_IDX(bd_cons);
1012 bd_prod = NEXT_RX_IDX(bd_prod);
1013 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1014 rx_pkt++;
1015next_cqe:
1016 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1017 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1018
1019 if (rx_pkt == budget)
1020 break;
1021 } /* while */
1022
1023 fp->rx_bd_cons = bd_cons;
1024 fp->rx_bd_prod = bd_prod_fw;
1025 fp->rx_comp_cons = sw_comp_cons;
1026 fp->rx_comp_prod = sw_comp_prod;
1027
1028 /* Update producers */
1029 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1030 fp->rx_sge_prod);
1031
1032 fp->rx_pkt += rx_pkt;
1033 fp->rx_calls++;
1034
1035 return rx_pkt;
1036}
1037
1038static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1039{
1040 struct bnx2x_fastpath *fp = fp_cookie;
1041 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001042 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001043
Merav Sicron51c1a582012-03-18 10:33:38 +00001044 DP(NETIF_MSG_INTR,
1045 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001046 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001047
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001048 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001049
1050#ifdef BNX2X_STOP_ON_ERROR
1051 if (unlikely(bp->panic))
1052 return IRQ_HANDLED;
1053#endif
1054
1055 /* Handle Rx and Tx according to MSI-X vector */
1056 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001057
1058 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001059 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001060
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001061 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001062 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063
1064 return IRQ_HANDLED;
1065}
1066
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001067/* HW Lock for shared dual port PHYs */
1068void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1069{
1070 mutex_lock(&bp->port.phy_mutex);
1071
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001072 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001073}
1074
1075void bnx2x_release_phy_lock(struct bnx2x *bp)
1076{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001077 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001078
1079 mutex_unlock(&bp->port.phy_mutex);
1080}
1081
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001082/* calculates MF speed according to current linespeed and MF configuration */
1083u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1084{
1085 u16 line_speed = bp->link_vars.line_speed;
1086 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001087 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1088 bp->mf_config[BP_VN(bp)]);
1089
1090 /* Calculate the current MAX line speed limit for the MF
1091 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001092 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001093 if (IS_MF_SI(bp))
1094 line_speed = (line_speed * maxCfg) / 100;
1095 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001096 u16 vn_max_rate = maxCfg * 100;
1097
1098 if (vn_max_rate < line_speed)
1099 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001100 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001101 }
1102
1103 return line_speed;
1104}
1105
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001106/**
1107 * bnx2x_fill_report_data - fill link report data to report
1108 *
1109 * @bp: driver handle
1110 * @data: link state to update
1111 *
1112 * It uses a none-atomic bit operations because is called under the mutex.
1113 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001114static void bnx2x_fill_report_data(struct bnx2x *bp,
1115 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001116{
1117 u16 line_speed = bnx2x_get_mf_speed(bp);
1118
1119 memset(data, 0, sizeof(*data));
1120
1121 /* Fill the report data: efective line speed */
1122 data->line_speed = line_speed;
1123
1124 /* Link is down */
1125 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1126 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1127 &data->link_report_flags);
1128
1129 /* Full DUPLEX */
1130 if (bp->link_vars.duplex == DUPLEX_FULL)
1131 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1132
1133 /* Rx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1135 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1136
1137 /* Tx Flow Control is ON */
1138 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1139 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1140}
1141
1142/**
1143 * bnx2x_link_report - report link status to OS.
1144 *
1145 * @bp: driver handle
1146 *
1147 * Calls the __bnx2x_link_report() under the same locking scheme
1148 * as a link/PHY state managing code to ensure a consistent link
1149 * reporting.
1150 */
1151
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001152void bnx2x_link_report(struct bnx2x *bp)
1153{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001154 bnx2x_acquire_phy_lock(bp);
1155 __bnx2x_link_report(bp);
1156 bnx2x_release_phy_lock(bp);
1157}
1158
1159/**
1160 * __bnx2x_link_report - report link status to OS.
1161 *
1162 * @bp: driver handle
1163 *
1164 * None atomic inmlementation.
1165 * Should be called under the phy_lock.
1166 */
1167void __bnx2x_link_report(struct bnx2x *bp)
1168{
1169 struct bnx2x_link_report_data cur_data;
1170
1171 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001172 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001173 bnx2x_read_mf_cfg(bp);
1174
1175 /* Read the current link report info */
1176 bnx2x_fill_report_data(bp, &cur_data);
1177
1178 /* Don't report link down or exactly the same link status twice */
1179 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1180 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1181 &bp->last_reported_link.link_report_flags) &&
1182 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1183 &cur_data.link_report_flags)))
1184 return;
1185
1186 bp->link_cnt++;
1187
1188 /* We are going to report a new link parameters now -
1189 * remember the current data for the next time.
1190 */
1191 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1192
1193 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1194 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001195 netif_carrier_off(bp->dev);
1196 netdev_err(bp->dev, "NIC Link is Down\n");
1197 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001198 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001199 const char *duplex;
1200 const char *flow;
1201
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001202 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001204 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1205 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001206 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001207 else
Joe Perches94f05b02011-08-14 12:16:20 +00001208 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001209
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001210 /* Handle the FC at the end so that only these flags would be
1211 * possibly set. This way we may easily check if there is no FC
1212 * enabled.
1213 */
1214 if (cur_data.link_report_flags) {
1215 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1216 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001217 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1218 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001219 flow = "ON - receive & transmit";
1220 else
1221 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001222 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001223 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001224 }
Joe Perches94f05b02011-08-14 12:16:20 +00001225 } else {
1226 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001227 }
Joe Perches94f05b02011-08-14 12:16:20 +00001228 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1229 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001230 }
1231}
1232
Eric Dumazet1191cb82012-04-27 21:39:21 +00001233static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1234{
1235 int i;
1236
1237 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1238 struct eth_rx_sge *sge;
1239
1240 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1241 sge->addr_hi =
1242 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1244
1245 sge->addr_lo =
1246 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1247 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1248 }
1249}
1250
1251static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1252 struct bnx2x_fastpath *fp, int last)
1253{
1254 int i;
1255
1256 for (i = 0; i < last; i++) {
1257 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1258 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1259 u8 *data = first_buf->data;
1260
1261 if (data == NULL) {
1262 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1263 continue;
1264 }
1265 if (tpa_info->tpa_state == BNX2X_TPA_START)
1266 dma_unmap_single(&bp->pdev->dev,
1267 dma_unmap_addr(first_buf, mapping),
1268 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001269 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001270 first_buf->data = NULL;
1271 }
1272}
1273
Merav Sicron55c11942012-11-07 00:45:48 +00001274void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1275{
1276 int j;
1277
1278 for_each_rx_queue_cnic(bp, j) {
1279 struct bnx2x_fastpath *fp = &bp->fp[j];
1280
1281 fp->rx_bd_cons = 0;
1282
1283 /* Activate BD ring */
1284 /* Warning!
1285 * this will generate an interrupt (to the TSTORM)
1286 * must only be done after chip is initialized
1287 */
1288 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289 fp->rx_sge_prod);
1290 }
1291}
1292
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001293void bnx2x_init_rx_rings(struct bnx2x *bp)
1294{
1295 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001296 u16 ring_prod;
1297 int i, j;
1298
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001299 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001300 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001301 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001303 DP(NETIF_MSG_IFUP,
1304 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1305
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001306 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001307 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001308 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001309 struct bnx2x_agg_info *tpa_info =
1310 &fp->tpa_info[i];
1311 struct sw_rx_bd *first_buf =
1312 &tpa_info->first_buf;
1313
Eric Dumazetd46d1322012-12-10 12:16:06 +00001314 first_buf->data = bnx2x_frag_alloc(fp);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001315 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001316 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1317 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001318 bnx2x_free_tpa_pool(bp, fp, i);
1319 fp->disable_tpa = 1;
1320 break;
1321 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001322 dma_unmap_addr_set(first_buf, mapping, 0);
1323 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001324 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001325
1326 /* "next page" elements initialization */
1327 bnx2x_set_next_page_sgl(fp);
1328
1329 /* set SGEs bit mask */
1330 bnx2x_init_sge_ring_bit_mask(fp);
1331
1332 /* Allocate SGEs and initialize the ring elements */
1333 for (i = 0, ring_prod = 0;
1334 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1335
1336 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001337 BNX2X_ERR("was only able to allocate %d rx sges\n",
1338 i);
1339 BNX2X_ERR("disabling TPA for queue[%d]\n",
1340 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001341 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001342 bnx2x_free_rx_sge_range(bp, fp,
1343 ring_prod);
1344 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001345 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001346 fp->disable_tpa = 1;
1347 ring_prod = 0;
1348 break;
1349 }
1350 ring_prod = NEXT_SGE_IDX(ring_prod);
1351 }
1352
1353 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354 }
1355 }
1356
Merav Sicron55c11942012-11-07 00:45:48 +00001357 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001358 struct bnx2x_fastpath *fp = &bp->fp[j];
1359
1360 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001361
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001362 /* Activate BD ring */
1363 /* Warning!
1364 * this will generate an interrupt (to the TSTORM)
1365 * must only be done after chip is initialized
1366 */
1367 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1368 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001369
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001370 if (j != 0)
1371 continue;
1372
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001373 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001374 REG_WR(bp, BAR_USTRORM_INTMEM +
1375 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1376 U64_LO(fp->rx_comp_mapping));
1377 REG_WR(bp, BAR_USTRORM_INTMEM +
1378 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1379 U64_HI(fp->rx_comp_mapping));
1380 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001381 }
1382}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001383
Merav Sicron55c11942012-11-07 00:45:48 +00001384static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1385{
1386 u8 cos;
1387 struct bnx2x *bp = fp->bp;
1388
1389 for_each_cos_in_tx_queue(fp, cos) {
1390 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1391 unsigned pkts_compl = 0, bytes_compl = 0;
1392
1393 u16 sw_prod = txdata->tx_pkt_prod;
1394 u16 sw_cons = txdata->tx_pkt_cons;
1395
1396 while (sw_cons != sw_prod) {
1397 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1398 &pkts_compl, &bytes_compl);
1399 sw_cons++;
1400 }
1401
1402 netdev_tx_reset_queue(
1403 netdev_get_tx_queue(bp->dev,
1404 txdata->txq_index));
1405 }
1406}
1407
1408static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1409{
1410 int i;
1411
1412 for_each_tx_queue_cnic(bp, i) {
1413 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1414 }
1415}
1416
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001417static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1418{
1419 int i;
1420
Merav Sicron55c11942012-11-07 00:45:48 +00001421 for_each_eth_queue(bp, i) {
1422 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001423 }
1424}
1425
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001426static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1427{
1428 struct bnx2x *bp = fp->bp;
1429 int i;
1430
1431 /* ring wasn't allocated */
1432 if (fp->rx_buf_ring == NULL)
1433 return;
1434
1435 for (i = 0; i < NUM_RX_BD; i++) {
1436 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001437 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001438
Eric Dumazete52fcb22011-11-14 06:05:34 +00001439 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001440 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001441 dma_unmap_single(&bp->pdev->dev,
1442 dma_unmap_addr(rx_buf, mapping),
1443 fp->rx_buf_size, DMA_FROM_DEVICE);
1444
Eric Dumazete52fcb22011-11-14 06:05:34 +00001445 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001446 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001447 }
1448}
1449
Merav Sicron55c11942012-11-07 00:45:48 +00001450static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1451{
1452 int j;
1453
1454 for_each_rx_queue_cnic(bp, j) {
1455 bnx2x_free_rx_bds(&bp->fp[j]);
1456 }
1457}
1458
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001459static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1460{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001461 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001462
Merav Sicron55c11942012-11-07 00:45:48 +00001463 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464 struct bnx2x_fastpath *fp = &bp->fp[j];
1465
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001466 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001467
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001468 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001469 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001470 }
1471}
1472
Merav Sicron55c11942012-11-07 00:45:48 +00001473void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1474{
1475 bnx2x_free_tx_skbs_cnic(bp);
1476 bnx2x_free_rx_skbs_cnic(bp);
1477}
1478
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001479void bnx2x_free_skbs(struct bnx2x *bp)
1480{
1481 bnx2x_free_tx_skbs(bp);
1482 bnx2x_free_rx_skbs(bp);
1483}
1484
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001485void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1486{
1487 /* load old values */
1488 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1489
1490 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1491 /* leave all but MAX value */
1492 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1493
1494 /* set new MAX value */
1495 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1496 & FUNC_MF_CFG_MAX_BW_MASK;
1497
1498 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499 }
1500}
1501
Dmitry Kravkovca924292011-06-14 01:33:08 +00001502/**
1503 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1504 *
1505 * @bp: driver handle
1506 * @nvecs: number of vectors to be released
1507 */
1508static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001509{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001510 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001511
Dmitry Kravkovca924292011-06-14 01:33:08 +00001512 if (nvecs == offset)
1513 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001514
1515 /* VFs don't have a default SB */
1516 if (IS_PF(bp)) {
1517 free_irq(bp->msix_table[offset].vector, bp->dev);
1518 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1519 bp->msix_table[offset].vector);
1520 offset++;
1521 }
Merav Sicron55c11942012-11-07 00:45:48 +00001522
1523 if (CNIC_SUPPORT(bp)) {
1524 if (nvecs == offset)
1525 return;
1526 offset++;
1527 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001528
Dmitry Kravkovca924292011-06-14 01:33:08 +00001529 for_each_eth_queue(bp, i) {
1530 if (nvecs == offset)
1531 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001532 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1533 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001534
1535 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001536 }
1537}
1538
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001539void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001540{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001541 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001542 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1543 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1544
1545 /* vfs don't have a default status block */
1546 if (IS_PF(bp))
1547 nvecs++;
1548
1549 bnx2x_free_msix_irqs(bp, nvecs);
1550 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001551 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001552 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001553}
1554
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001555int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001556{
Ariel Elior1ab44342013-01-01 05:22:23 +00001557 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001558
Ariel Elior1ab44342013-01-01 05:22:23 +00001559 /* VFs don't have a default status block */
1560 if (IS_PF(bp)) {
1561 bp->msix_table[msix_vec].entry = msix_vec;
1562 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1563 bp->msix_table[0].entry);
1564 msix_vec++;
1565 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001566
Merav Sicron55c11942012-11-07 00:45:48 +00001567 /* Cnic requires an msix vector for itself */
1568 if (CNIC_SUPPORT(bp)) {
1569 bp->msix_table[msix_vec].entry = msix_vec;
1570 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1571 msix_vec, bp->msix_table[msix_vec].entry);
1572 msix_vec++;
1573 }
1574
Ariel Elior6383c0b2011-07-14 08:31:57 +00001575 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001576 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001577 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001578 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1579 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001580 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001581 }
1582
Ariel Elior1ab44342013-01-01 05:22:23 +00001583 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1584 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001585
Ariel Elior1ab44342013-01-01 05:22:23 +00001586 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001587
1588 /*
1589 * reconfigure number of tx/rx queues according to available
1590 * MSI-X vectors
1591 */
Merav Sicron55c11942012-11-07 00:45:48 +00001592 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001593 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001594 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001595
Merav Sicron51c1a582012-03-18 10:33:38 +00001596 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001597
1598 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1599
1600 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001601 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1602 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001603 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001604 /*
1605 * decrease number of queues by number of unallocated entries
1606 */
Merav Sicron55c11942012-11-07 00:45:48 +00001607 bp->num_ethernet_queues -= diff;
1608 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001609
Merav Sicron51c1a582012-03-18 10:33:38 +00001610 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001611 bp->num_queues);
1612 } else if (rc > 0) {
1613 /* Get by with single vector */
1614 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1615 if (rc) {
1616 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617 rc);
1618 goto no_msix;
1619 }
1620
1621 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1622 bp->flags |= USING_SINGLE_MSIX_FLAG;
1623
Merav Sicron55c11942012-11-07 00:45:48 +00001624 BNX2X_DEV_INFO("set number of queues to 1\n");
1625 bp->num_ethernet_queues = 1;
1626 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001627 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001628 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001629 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001630 }
1631
1632 bp->flags |= USING_MSIX_FLAG;
1633
1634 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001635
1636no_msix:
1637 /* fall to INTx if not enough memory */
1638 if (rc == -ENOMEM)
1639 bp->flags |= DISABLE_MSI_FLAG;
1640
1641 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001642}
1643
1644static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1645{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001646 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001647
Ariel Eliorad5afc82013-01-01 05:22:26 +00001648 /* no default status block for vf */
1649 if (IS_PF(bp)) {
1650 rc = request_irq(bp->msix_table[offset++].vector,
1651 bnx2x_msix_sp_int, 0,
1652 bp->dev->name, bp->dev);
1653 if (rc) {
1654 BNX2X_ERR("request sp irq failed\n");
1655 return -EBUSY;
1656 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001657 }
1658
Merav Sicron55c11942012-11-07 00:45:48 +00001659 if (CNIC_SUPPORT(bp))
1660 offset++;
1661
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001662 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001663 struct bnx2x_fastpath *fp = &bp->fp[i];
1664 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1665 bp->dev->name, i);
1666
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001667 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001668 bnx2x_msix_fp_int, 0, fp->name, fp);
1669 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001670 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1671 bp->msix_table[offset].vector, rc);
1672 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673 return -EBUSY;
1674 }
1675
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001676 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001677 }
1678
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001679 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001680 if (IS_PF(bp)) {
1681 offset = 1 + CNIC_SUPPORT(bp);
1682 netdev_info(bp->dev,
1683 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1684 bp->msix_table[0].vector,
1685 0, bp->msix_table[offset].vector,
1686 i - 1, bp->msix_table[offset + i - 1].vector);
1687 } else {
1688 offset = CNIC_SUPPORT(bp);
1689 netdev_info(bp->dev,
1690 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1691 0, bp->msix_table[offset].vector,
1692 i - 1, bp->msix_table[offset + i - 1].vector);
1693 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001694 return 0;
1695}
1696
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001697int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001698{
1699 int rc;
1700
1701 rc = pci_enable_msi(bp->pdev);
1702 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001703 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001704 return -1;
1705 }
1706 bp->flags |= USING_MSI_FLAG;
1707
1708 return 0;
1709}
1710
1711static int bnx2x_req_irq(struct bnx2x *bp)
1712{
1713 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001714 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001715
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001716 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001717 flags = 0;
1718 else
1719 flags = IRQF_SHARED;
1720
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001721 if (bp->flags & USING_MSIX_FLAG)
1722 irq = bp->msix_table[0].vector;
1723 else
1724 irq = bp->pdev->irq;
1725
1726 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001727}
1728
Yuval Mintzecf01c22013-04-22 02:53:03 +00001729int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001730{
1731 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001732 if (bp->flags & USING_MSIX_FLAG &&
1733 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001734 rc = bnx2x_req_msix_irqs(bp);
1735 if (rc)
1736 return rc;
1737 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001738 rc = bnx2x_req_irq(bp);
1739 if (rc) {
1740 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1741 return rc;
1742 }
1743 if (bp->flags & USING_MSI_FLAG) {
1744 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001745 netdev_info(bp->dev, "using MSI IRQ %d\n",
1746 bp->dev->irq);
1747 }
1748 if (bp->flags & USING_MSIX_FLAG) {
1749 bp->dev->irq = bp->msix_table[0].vector;
1750 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1751 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001752 }
1753 }
1754
1755 return 0;
1756}
1757
Merav Sicron55c11942012-11-07 00:45:48 +00001758static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1759{
1760 int i;
1761
1762 for_each_rx_queue_cnic(bp, i)
1763 napi_enable(&bnx2x_fp(bp, i, napi));
1764}
1765
Eric Dumazet1191cb82012-04-27 21:39:21 +00001766static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001767{
1768 int i;
1769
Merav Sicron55c11942012-11-07 00:45:48 +00001770 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001771 napi_enable(&bnx2x_fp(bp, i, napi));
1772}
1773
Merav Sicron55c11942012-11-07 00:45:48 +00001774static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1775{
1776 int i;
1777
1778 for_each_rx_queue_cnic(bp, i)
1779 napi_disable(&bnx2x_fp(bp, i, napi));
1780}
1781
Eric Dumazet1191cb82012-04-27 21:39:21 +00001782static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001783{
1784 int i;
1785
Merav Sicron55c11942012-11-07 00:45:48 +00001786 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001787 napi_disable(&bnx2x_fp(bp, i, napi));
1788}
1789
1790void bnx2x_netif_start(struct bnx2x *bp)
1791{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001792 if (netif_running(bp->dev)) {
1793 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001794 if (CNIC_LOADED(bp))
1795 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001796 bnx2x_int_enable(bp);
1797 if (bp->state == BNX2X_STATE_OPEN)
1798 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001799 }
1800}
1801
1802void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1803{
1804 bnx2x_int_disable_sync(bp, disable_hw);
1805 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001806 if (CNIC_LOADED(bp))
1807 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001808}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001809
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001810u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1811{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001812 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001813
Merav Sicron55c11942012-11-07 00:45:48 +00001814 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001815 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1816 u16 ether_type = ntohs(hdr->h_proto);
1817
1818 /* Skip VLAN tag if present */
1819 if (ether_type == ETH_P_8021Q) {
1820 struct vlan_ethhdr *vhdr =
1821 (struct vlan_ethhdr *)skb->data;
1822
1823 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1824 }
1825
1826 /* If ethertype is FCoE or FIP - use FCoE ring */
1827 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001828 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001829 }
Merav Sicron55c11942012-11-07 00:45:48 +00001830
David S. Miller823dcd22011-08-20 10:39:12 -07001831 /* select a non-FCoE queue */
Eric Dumazetada7c192013-05-31 14:32:55 +00001832 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001833}
1834
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001835void bnx2x_set_num_queues(struct bnx2x *bp)
1836{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001837 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001838 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001839
Barak Witkowskia3348722012-04-23 03:04:46 +00001840 /* override in STORAGE SD modes */
1841 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001842 bp->num_ethernet_queues = 1;
1843
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001844 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001845 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1846 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001847
1848 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001849}
1850
David S. Miller823dcd22011-08-20 10:39:12 -07001851/**
1852 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1853 *
1854 * @bp: Driver handle
1855 *
1856 * We currently support for at most 16 Tx queues for each CoS thus we will
1857 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1858 * bp->max_cos.
1859 *
1860 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1861 * index after all ETH L2 indices.
1862 *
1863 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1864 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1865 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1866 *
1867 * The proper configuration of skb->queue_mapping is handled by
1868 * bnx2x_select_queue() and __skb_tx_hash().
1869 *
1870 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1871 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1872 */
Merav Sicron55c11942012-11-07 00:45:48 +00001873static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001874{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001875 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001876
Merav Sicron65565882012-06-19 07:48:26 +00001877 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001878 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001879
1880/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001881 if (include_cnic && !NO_FCOE(bp)) {
1882 rx++;
1883 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001884 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001885
1886 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1887 if (rc) {
1888 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1889 return rc;
1890 }
1891 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1892 if (rc) {
1893 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1894 return rc;
1895 }
1896
Merav Sicron51c1a582012-03-18 10:33:38 +00001897 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001898 tx, rx);
1899
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001900 return rc;
1901}
1902
Eric Dumazet1191cb82012-04-27 21:39:21 +00001903static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001904{
1905 int i;
1906
1907 for_each_queue(bp, i) {
1908 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001909 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001910
1911 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1912 if (IS_FCOE_IDX(i))
1913 /*
1914 * Although there are no IP frames expected to arrive to
1915 * this ring we still want to add an
1916 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1917 * overrun attack.
1918 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001919 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001920 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001921 mtu = bp->dev->mtu;
1922 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1923 IP_HEADER_ALIGNMENT_PADDING +
1924 ETH_OVREHEAD +
1925 mtu +
1926 BNX2X_FW_RX_ALIGN_END;
1927 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001928 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1929 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1930 else
1931 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001932 }
1933}
1934
Eric Dumazet1191cb82012-04-27 21:39:21 +00001935static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001936{
1937 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001938 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1939
Dmitry Kravkov96305232012-04-03 18:41:30 +00001940 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001941 * enabled
1942 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001943 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1944 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001945 bp->fp->cl_id +
1946 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001947
1948 /*
1949 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1950 * per-port, so if explicit configuration is needed , do it only
1951 * for a PMF.
1952 *
1953 * For 57712 and newer on the other hand it's a per-function
1954 * configuration.
1955 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001956 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957}
1958
Dmitry Kravkov96305232012-04-03 18:41:30 +00001959int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001960 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001961{
Yuval Mintz3b603062012-03-18 10:33:39 +00001962 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001963
1964 /* Although RSS is meaningless when there is a single HW queue we
1965 * still need it enabled in order to have HW Rx hash generated.
1966 *
1967 * if (!is_eth_multi(bp))
1968 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1969 */
1970
Dmitry Kravkov96305232012-04-03 18:41:30 +00001971 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001972
1973 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1974
Dmitry Kravkov96305232012-04-03 18:41:30 +00001975 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001976
Dmitry Kravkov96305232012-04-03 18:41:30 +00001977 /* RSS configuration */
1978 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1979 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1980 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1981 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001982 if (rss_obj->udp_rss_v4)
1983 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1984 if (rss_obj->udp_rss_v6)
1985 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001986
Dmitry Kravkov96305232012-04-03 18:41:30 +00001987 /* Hash bits */
1988 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001989
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001990 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001991
Dmitry Kravkov96305232012-04-03 18:41:30 +00001992 if (config_hash) {
1993 /* RSS keys */
Akinobu Mita8376d0b2012-12-17 16:04:28 -08001994 prandom_bytes(params.rss_key, sizeof(params.rss_key));
Dmitry Kravkov96305232012-04-03 18:41:30 +00001995 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001996 }
1997
1998 return bnx2x_config_rss(bp, &params);
1999}
2000
Eric Dumazet1191cb82012-04-27 21:39:21 +00002001static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002002{
Yuval Mintz3b603062012-03-18 10:33:39 +00002003 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002004
2005 /* Prepare parameters for function state transitions */
2006 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2007
2008 func_params.f_obj = &bp->func_obj;
2009 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2010
2011 func_params.params.hw_init.load_phase = load_code;
2012
2013 return bnx2x_func_state_change(bp, &func_params);
2014}
2015
2016/*
2017 * Cleans the object that have internal lists without sending
2018 * ramrods. Should be run when interrutps are disabled.
2019 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002020void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002021{
2022 int rc;
2023 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002024 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002025 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002026
2027 /***************** Cleanup MACs' object first *************************/
2028
2029 /* Wait for completion of requested */
2030 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2031 /* Perform a dry cleanup */
2032 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2033
2034 /* Clean ETH primary MAC */
2035 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002036 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002037 &ramrod_flags);
2038 if (rc != 0)
2039 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2040
2041 /* Cleanup UC list */
2042 vlan_mac_flags = 0;
2043 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2044 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2045 &ramrod_flags);
2046 if (rc != 0)
2047 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2048
2049 /***************** Now clean mcast object *****************************/
2050 rparam.mcast_obj = &bp->mcast_obj;
2051 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2052
2053 /* Add a DEL command... */
2054 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2055 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002056 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2057 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002058
2059 /* ...and wait until all pending commands are cleared */
2060 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2061 while (rc != 0) {
2062 if (rc < 0) {
2063 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2064 rc);
2065 return;
2066 }
2067
2068 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2069 }
2070}
2071
2072#ifndef BNX2X_STOP_ON_ERROR
2073#define LOAD_ERROR_EXIT(bp, label) \
2074 do { \
2075 (bp)->state = BNX2X_STATE_ERROR; \
2076 goto label; \
2077 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002078
2079#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2080 do { \
2081 bp->cnic_loaded = false; \
2082 goto label; \
2083 } while (0)
2084#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002085#define LOAD_ERROR_EXIT(bp, label) \
2086 do { \
2087 (bp)->state = BNX2X_STATE_ERROR; \
2088 (bp)->panic = 1; \
2089 return -EBUSY; \
2090 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002091#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2092 do { \
2093 bp->cnic_loaded = false; \
2094 (bp)->panic = 1; \
2095 return -EBUSY; \
2096 } while (0)
2097#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002098
Ariel Eliorad5afc82013-01-01 05:22:26 +00002099static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002100{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002101 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2102 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2103 return;
2104}
Yuval Mintz452427b2012-03-26 20:47:07 +00002105
Ariel Eliorad5afc82013-01-01 05:22:26 +00002106static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2107{
Ariel Elior8db573b2013-01-01 05:22:37 +00002108 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002109 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002110
Ariel Eliorad5afc82013-01-01 05:22:26 +00002111 /* number of queues for statistics is number of eth queues + FCoE */
2112 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002113
Ariel Eliorad5afc82013-01-01 05:22:26 +00002114 /* Total number of FW statistics requests =
2115 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2116 * and fcoe l2 queue) stats + num of queues (which includes another 1
2117 * for fcoe l2 queue if applicable)
2118 */
2119 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2120
Ariel Elior8db573b2013-01-01 05:22:37 +00002121 /* vf stats appear in the request list, but their data is allocated by
2122 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2123 * it is used to determine where to place the vf stats queries in the
2124 * request struct
2125 */
2126 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002127 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002128
Ariel Eliorad5afc82013-01-01 05:22:26 +00002129 /* Request is built from stats_query_header and an array of
2130 * stats_query_cmd_group each of which contains
2131 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2132 * configured in the stats_query_header.
2133 */
2134 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002135 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2136 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002137 1 : 0));
2138
Ariel Elior8db573b2013-01-01 05:22:37 +00002139 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2140 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002141 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2142 num_groups * sizeof(struct stats_query_cmd_group);
2143
2144 /* Data for statistics requests + stats_counter
2145 * stats_counter holds per-STORM counters that are incremented
2146 * when STORM has finished with the current request.
2147 * memory for FCoE offloaded statistics are counted anyway,
2148 * even if they will not be sent.
2149 * VF stats are not accounted for here as the data of VF stats is stored
2150 * in memory allocated by the VF, not here.
2151 */
2152 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2153 sizeof(struct per_pf_stats) +
2154 sizeof(struct fcoe_statistics_params) +
2155 sizeof(struct per_queue_stats) * num_queue_stats +
2156 sizeof(struct stats_counter);
2157
2158 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2159 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2160
2161 /* Set shortcuts */
2162 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2163 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2164 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2165 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2166 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2167 bp->fw_stats_req_sz;
2168
2169 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2170 U64_HI(bp->fw_stats_req_mapping),
2171 U64_LO(bp->fw_stats_req_mapping));
2172 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2173 U64_HI(bp->fw_stats_data_mapping),
2174 U64_LO(bp->fw_stats_data_mapping));
2175 return 0;
2176
2177alloc_mem_err:
2178 bnx2x_free_fw_stats_mem(bp);
2179 BNX2X_ERR("Can't allocate FW stats memory\n");
2180 return -ENOMEM;
2181}
2182
2183/* send load request to mcp and analyze response */
2184static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2185{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002186 u32 param;
2187
Ariel Eliorad5afc82013-01-01 05:22:26 +00002188 /* init fw_seq */
2189 bp->fw_seq =
2190 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2191 DRV_MSG_SEQ_NUMBER_MASK);
2192 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2193
2194 /* Get current FW pulse sequence */
2195 bp->fw_drv_pulse_wr_seq =
2196 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2197 DRV_PULSE_SEQ_MASK);
2198 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2199
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002200 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2201
2202 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2203 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2204
Ariel Eliorad5afc82013-01-01 05:22:26 +00002205 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002206 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002207
2208 /* if mcp fails to respond we must abort */
2209 if (!(*load_code)) {
2210 BNX2X_ERR("MCP response failure, aborting\n");
2211 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002212 }
2213
Ariel Eliorad5afc82013-01-01 05:22:26 +00002214 /* If mcp refused (e.g. other port is in diagnostic mode) we
2215 * must abort
2216 */
2217 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2218 BNX2X_ERR("MCP refused load request, aborting\n");
2219 return -EBUSY;
2220 }
2221 return 0;
2222}
2223
2224/* check whether another PF has already loaded FW to chip. In
2225 * virtualized environments a pf from another VM may have already
2226 * initialized the device including loading FW
2227 */
2228int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2229{
2230 /* is another pf loaded on this engine? */
2231 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2232 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2233 /* build my FW version dword */
2234 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2235 (BCM_5710_FW_MINOR_VERSION << 8) +
2236 (BCM_5710_FW_REVISION_VERSION << 16) +
2237 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2238
2239 /* read loaded FW from chip */
2240 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2241
2242 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2243 loaded_fw, my_fw);
2244
2245 /* abort nic load if version mismatch */
2246 if (my_fw != loaded_fw) {
2247 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2248 loaded_fw, my_fw);
2249 return -EBUSY;
2250 }
2251 }
2252 return 0;
2253}
2254
2255/* returns the "mcp load_code" according to global load_count array */
2256static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2257{
2258 int path = BP_PATH(bp);
2259
2260 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2261 path, load_count[path][0], load_count[path][1],
2262 load_count[path][2]);
2263 load_count[path][0]++;
2264 load_count[path][1 + port]++;
2265 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2266 path, load_count[path][0], load_count[path][1],
2267 load_count[path][2]);
2268 if (load_count[path][0] == 1)
2269 return FW_MSG_CODE_DRV_LOAD_COMMON;
2270 else if (load_count[path][1 + port] == 1)
2271 return FW_MSG_CODE_DRV_LOAD_PORT;
2272 else
2273 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2274}
2275
2276/* mark PMF if applicable */
2277static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2278{
2279 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2280 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2281 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2282 bp->port.pmf = 1;
2283 /* We need the barrier to ensure the ordering between the
2284 * writing to bp->port.pmf here and reading it from the
2285 * bnx2x_periodic_task().
2286 */
2287 smp_mb();
2288 } else {
2289 bp->port.pmf = 0;
2290 }
2291
2292 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2293}
2294
2295static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2296{
2297 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2298 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2299 (bp->common.shmem2_base)) {
2300 if (SHMEM2_HAS(bp, dcc_support))
2301 SHMEM2_WR(bp, dcc_support,
2302 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2303 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2304 if (SHMEM2_HAS(bp, afex_driver_support))
2305 SHMEM2_WR(bp, afex_driver_support,
2306 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2307 }
2308
2309 /* Set AFEX default VLAN tag to an invalid value */
2310 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002311}
2312
Eric Dumazet1191cb82012-04-27 21:39:21 +00002313/**
2314 * bnx2x_bz_fp - zero content of the fastpath structure.
2315 *
2316 * @bp: driver handle
2317 * @index: fastpath index to be zeroed
2318 *
2319 * Makes sure the contents of the bp->fp[index].napi is kept
2320 * intact.
2321 */
2322static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2323{
2324 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00002325
Merav Sicron65565882012-06-19 07:48:26 +00002326 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002327 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002328 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002329 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002330 if (fp->tpa_info)
2331 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2332 sizeof(struct bnx2x_agg_info));
2333 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002334
2335 /* Restore the NAPI object as it has been already initialized */
2336 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002337 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002338 fp->bp = bp;
2339 fp->index = index;
2340 if (IS_ETH_FP(fp))
2341 fp->max_cos = bp->max_cos;
2342 else
2343 /* Special queues support only one CoS */
2344 fp->max_cos = 1;
2345
Merav Sicron65565882012-06-19 07:48:26 +00002346 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002347 if (IS_FCOE_FP(fp))
2348 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002349 if (IS_ETH_FP(fp))
2350 for_each_cos_in_tx_queue(fp, cos)
2351 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2352 BNX2X_NUM_ETH_QUEUES(bp) + index];
2353
Eric Dumazet1191cb82012-04-27 21:39:21 +00002354 /*
2355 * set the tpa flag for each queue. The tpa flag determines the queue
2356 * minimal size so it must be set prior to queue memory allocation
2357 */
2358 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2359 (bp->flags & GRO_ENABLE_FLAG &&
2360 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2361 if (bp->flags & TPA_ENABLE_FLAG)
2362 fp->mode = TPA_MODE_LRO;
2363 else if (bp->flags & GRO_ENABLE_FLAG)
2364 fp->mode = TPA_MODE_GRO;
2365
Eric Dumazet1191cb82012-04-27 21:39:21 +00002366 /* We don't want TPA on an FCoE L2 ring */
2367 if (IS_FCOE_FP(fp))
2368 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002369}
2370
2371int bnx2x_load_cnic(struct bnx2x *bp)
2372{
2373 int i, rc, port = BP_PORT(bp);
2374
2375 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2376
2377 mutex_init(&bp->cnic_mutex);
2378
Ariel Eliorad5afc82013-01-01 05:22:26 +00002379 if (IS_PF(bp)) {
2380 rc = bnx2x_alloc_mem_cnic(bp);
2381 if (rc) {
2382 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2383 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2384 }
Merav Sicron55c11942012-11-07 00:45:48 +00002385 }
2386
2387 rc = bnx2x_alloc_fp_mem_cnic(bp);
2388 if (rc) {
2389 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2390 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2391 }
2392
2393 /* Update the number of queues with the cnic queues */
2394 rc = bnx2x_set_real_num_queues(bp, 1);
2395 if (rc) {
2396 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2397 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2398 }
2399
2400 /* Add all CNIC NAPI objects */
2401 bnx2x_add_all_napi_cnic(bp);
2402 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2403 bnx2x_napi_enable_cnic(bp);
2404
2405 rc = bnx2x_init_hw_func_cnic(bp);
2406 if (rc)
2407 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2408
2409 bnx2x_nic_init_cnic(bp);
2410
Ariel Eliorad5afc82013-01-01 05:22:26 +00002411 if (IS_PF(bp)) {
2412 /* Enable Timer scan */
2413 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002414
Ariel Eliorad5afc82013-01-01 05:22:26 +00002415 /* setup cnic queues */
2416 for_each_cnic_queue(bp, i) {
2417 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2418 if (rc) {
2419 BNX2X_ERR("Queue setup failed\n");
2420 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2421 }
Merav Sicron55c11942012-11-07 00:45:48 +00002422 }
2423 }
2424
2425 /* Initialize Rx filter. */
2426 netif_addr_lock_bh(bp->dev);
2427 bnx2x_set_rx_mode(bp->dev);
2428 netif_addr_unlock_bh(bp->dev);
2429
2430 /* re-read iscsi info */
2431 bnx2x_get_iscsi_info(bp);
2432 bnx2x_setup_cnic_irq_info(bp);
2433 bnx2x_setup_cnic_info(bp);
2434 bp->cnic_loaded = true;
2435 if (bp->state == BNX2X_STATE_OPEN)
2436 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2437
2438
2439 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2440
2441 return 0;
2442
2443#ifndef BNX2X_STOP_ON_ERROR
2444load_error_cnic2:
2445 /* Disable Timer scan */
2446 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2447
2448load_error_cnic1:
2449 bnx2x_napi_disable_cnic(bp);
2450 /* Update the number of queues without the cnic queues */
2451 rc = bnx2x_set_real_num_queues(bp, 0);
2452 if (rc)
2453 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2454load_error_cnic0:
2455 BNX2X_ERR("CNIC-related load failed\n");
2456 bnx2x_free_fp_mem_cnic(bp);
2457 bnx2x_free_mem_cnic(bp);
2458 return rc;
2459#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002460}
2461
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002462/* must be called with rtnl_lock */
2463int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2464{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002465 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002466 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002467
Merav Sicron55c11942012-11-07 00:45:48 +00002468 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2469 DP(NETIF_MSG_IFUP,
2470 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2471
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002472#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002473 if (unlikely(bp->panic)) {
2474 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002475 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002476 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002477#endif
2478
2479 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2480
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002481 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2482 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2483 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002484
Ariel Eliorad5afc82013-01-01 05:22:26 +00002485 if (IS_PF(bp))
2486 /* must be called before memory allocation and HW init */
2487 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002488
Ariel Elior6383c0b2011-07-14 08:31:57 +00002489 /*
2490 * Zero fastpath structures preserving invariants like napi, which are
2491 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002492 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002493 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002494 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002495 for_each_queue(bp, i)
2496 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002497 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2498 bp->num_cnic_queues) *
2499 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002500
Merav Sicron55c11942012-11-07 00:45:48 +00002501 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002502
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002503 /* Set the receive queues buffer size */
2504 bnx2x_set_rx_buf_size(bp);
2505
Ariel Eliorad5afc82013-01-01 05:22:26 +00002506 if (IS_PF(bp)) {
2507 rc = bnx2x_alloc_mem(bp);
2508 if (rc) {
2509 BNX2X_ERR("Unable to allocate bp memory\n");
2510 return rc;
2511 }
2512 }
2513
2514 /* Allocated memory for FW statistics */
2515 if (bnx2x_alloc_fw_stats_mem(bp))
2516 LOAD_ERROR_EXIT(bp, load_error0);
2517
2518 /* need to be done after alloc mem, since it's self adjusting to amount
2519 * of memory available for RSS queues
2520 */
2521 rc = bnx2x_alloc_fp_mem(bp);
2522 if (rc) {
2523 BNX2X_ERR("Unable to allocate memory for fps\n");
2524 LOAD_ERROR_EXIT(bp, load_error0);
2525 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002526
Ariel Elior8d9ac292013-01-01 05:22:27 +00002527 /* request pf to initialize status blocks */
2528 if (IS_VF(bp)) {
2529 rc = bnx2x_vfpf_init(bp);
2530 if (rc)
2531 LOAD_ERROR_EXIT(bp, load_error0);
2532 }
2533
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002534 /* As long as bnx2x_alloc_mem() may possibly update
2535 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002536 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002537 */
Merav Sicron55c11942012-11-07 00:45:48 +00002538 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002539 if (rc) {
2540 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002541 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002542 }
2543
Ariel Elior6383c0b2011-07-14 08:31:57 +00002544 /* configure multi cos mappings in kernel.
2545 * this configuration may be overriden by a multi class queue discipline
2546 * or by a dcbx negotiation result.
2547 */
2548 bnx2x_setup_tc(bp->dev, bp->max_cos);
2549
Merav Sicron26614ba2012-08-27 03:26:19 +00002550 /* Add all NAPI objects */
2551 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002552 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002553 bnx2x_napi_enable(bp);
2554
Ariel Eliorad5afc82013-01-01 05:22:26 +00002555 if (IS_PF(bp)) {
2556 /* set pf load just before approaching the MCP */
2557 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002558
Ariel Eliorad5afc82013-01-01 05:22:26 +00002559 /* if mcp exists send load request and analyze response */
2560 if (!BP_NOMCP(bp)) {
2561 /* attempt to load pf */
2562 rc = bnx2x_nic_load_request(bp, &load_code);
2563 if (rc)
2564 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002565
Ariel Eliorad5afc82013-01-01 05:22:26 +00002566 /* what did mcp say? */
2567 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2568 if (rc) {
2569 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002570 LOAD_ERROR_EXIT(bp, load_error2);
2571 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002572 } else {
2573 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002574 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002575
Ariel Eliorad5afc82013-01-01 05:22:26 +00002576 /* mark pmf if applicable */
2577 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002578
Ariel Eliorad5afc82013-01-01 05:22:26 +00002579 /* Init Function state controlling object */
2580 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002581
Ariel Eliorad5afc82013-01-01 05:22:26 +00002582 /* Initialize HW */
2583 rc = bnx2x_init_hw(bp, load_code);
2584 if (rc) {
2585 BNX2X_ERR("HW init failed, aborting\n");
2586 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2587 LOAD_ERROR_EXIT(bp, load_error2);
2588 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002589 }
2590
Yuval Mintzecf01c22013-04-22 02:53:03 +00002591 bnx2x_pre_irq_nic_init(bp);
2592
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002593 /* Connect to IRQs */
2594 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002595 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002596 BNX2X_ERR("setup irqs failed\n");
2597 if (IS_PF(bp))
2598 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002599 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002600 }
2601
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002602 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002603 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002604 /* Setup NIC internals and enable interrupts */
2605 bnx2x_post_irq_nic_init(bp, load_code);
2606
Ariel Eliorad5afc82013-01-01 05:22:26 +00002607 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002608 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002609
Ariel Eliorad5afc82013-01-01 05:22:26 +00002610 /* Set AFEX default VLAN tag to an invalid value */
2611 bp->afex_def_vlan_tag = -1;
2612 bnx2x_nic_load_afex_dcc(bp, load_code);
2613 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2614 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002615 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002616 BNX2X_ERR("Function start failed!\n");
2617 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2618
Merav Sicron55c11942012-11-07 00:45:48 +00002619 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002620 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002621
Ariel Eliorad5afc82013-01-01 05:22:26 +00002622 /* Send LOAD_DONE command to MCP */
2623 if (!BP_NOMCP(bp)) {
2624 load_code = bnx2x_fw_command(bp,
2625 DRV_MSG_CODE_LOAD_DONE, 0);
2626 if (!load_code) {
2627 BNX2X_ERR("MCP response failure, aborting\n");
2628 rc = -EBUSY;
2629 LOAD_ERROR_EXIT(bp, load_error3);
2630 }
2631 }
2632
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002633 /* initialize FW coalescing state machines in RAM */
2634 bnx2x_update_coalesce(bp);
2635
Ariel Eliorad5afc82013-01-01 05:22:26 +00002636 /* setup the leading queue */
2637 rc = bnx2x_setup_leading(bp);
2638 if (rc) {
2639 BNX2X_ERR("Setup leading failed!\n");
2640 LOAD_ERROR_EXIT(bp, load_error3);
2641 }
2642
2643 /* set up the rest of the queues */
2644 for_each_nondefault_eth_queue(bp, i) {
2645 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2646 if (rc) {
2647 BNX2X_ERR("Queue setup failed\n");
2648 LOAD_ERROR_EXIT(bp, load_error3);
2649 }
2650 }
2651
2652 /* setup rss */
2653 rc = bnx2x_init_rss_pf(bp);
2654 if (rc) {
2655 BNX2X_ERR("PF RSS init failed\n");
2656 LOAD_ERROR_EXIT(bp, load_error3);
2657 }
Ariel Elior8d9ac292013-01-01 05:22:27 +00002658
2659 } else { /* vf */
2660 for_each_eth_queue(bp, i) {
2661 rc = bnx2x_vfpf_setup_q(bp, i);
2662 if (rc) {
2663 BNX2X_ERR("Queue setup failed\n");
2664 LOAD_ERROR_EXIT(bp, load_error3);
2665 }
2666 }
Merav Sicron51c1a582012-03-18 10:33:38 +00002667 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002668
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002669 /* Now when Clients are configured we are ready to work */
2670 bp->state = BNX2X_STATE_OPEN;
2671
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002672 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002673 if (IS_PF(bp))
2674 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002675 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002676 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2677 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002678 if (rc) {
2679 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002680 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002681 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002682
Ariel Eliorad5afc82013-01-01 05:22:26 +00002683 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002684 bnx2x_update_max_mf_config(bp, bp->pending_max);
2685 bp->pending_max = 0;
2686 }
2687
Ariel Eliorad5afc82013-01-01 05:22:26 +00002688 if (bp->port.pmf) {
2689 rc = bnx2x_initial_phy_init(bp, load_mode);
2690 if (rc)
2691 LOAD_ERROR_EXIT(bp, load_error3);
2692 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002693 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002694
2695 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002696
2697 /* Initialize Rx filter. */
2698 netif_addr_lock_bh(bp->dev);
2699 bnx2x_set_rx_mode(bp->dev);
2700 netif_addr_unlock_bh(bp->dev);
2701
2702 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002703 switch (load_mode) {
2704 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002705 /* Tx queue should be only reenabled */
2706 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002707 break;
2708
2709 case LOAD_OPEN:
2710 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002711 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002712 break;
2713
2714 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002715 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002716 bp->state = BNX2X_STATE_DIAG;
2717 break;
2718
2719 default:
2720 break;
2721 }
2722
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002723 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002724 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002725 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002726 bnx2x__link_status_update(bp);
2727
2728 /* start the timer */
2729 mod_timer(&bp->timer, jiffies + bp->current_interval);
2730
Merav Sicron55c11942012-11-07 00:45:48 +00002731 if (CNIC_ENABLED(bp))
2732 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002733
Ariel Eliorad5afc82013-01-01 05:22:26 +00002734 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2735 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002736 u32 val;
2737 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2738 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2739 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2740 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2741 }
2742
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002743 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002744 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002745 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002746 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002747 return -EBUSY;
2748 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002749
Barak Witkowski98768792012-06-19 07:48:31 +00002750 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2751 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2752 bnx2x_dcbx_init(bp, false);
2753
Merav Sicron55c11942012-11-07 00:45:48 +00002754 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2755
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002756 return 0;
2757
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002758#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002759load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002760 if (IS_PF(bp)) {
2761 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002762
Ariel Eliorad5afc82013-01-01 05:22:26 +00002763 /* Clean queueable objects */
2764 bnx2x_squeeze_objects(bp);
2765 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002766
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002767 /* Free SKBs, SGEs, TPA pool and driver internals */
2768 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002769 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002770 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002771
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002772 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002773 bnx2x_free_irq(bp);
2774load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002775 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002776 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2777 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2778 }
2779
2780 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002781load_error1:
2782 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002783 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002784
Ariel Elior889b9af2012-01-26 06:01:51 +00002785 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002786 if (IS_PF(bp))
2787 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002788load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002789 bnx2x_free_fp_mem(bp);
2790 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002791 bnx2x_free_mem(bp);
2792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002794#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002795}
2796
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002797int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002798{
2799 u8 rc = 0, cos, i;
2800
2801 /* Wait until tx fastpath tasks complete */
2802 for_each_tx_queue(bp, i) {
2803 struct bnx2x_fastpath *fp = &bp->fp[i];
2804
2805 for_each_cos_in_tx_queue(fp, cos)
2806 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2807 if (rc)
2808 return rc;
2809 }
2810 return 0;
2811}
2812
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002813/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002814int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002815{
2816 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002817 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002818
Merav Sicron55c11942012-11-07 00:45:48 +00002819 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2820
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002821 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002822 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002823 u32 val;
2824 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2825 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2826 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2827 }
2828
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002829 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002830 (bp->state == BNX2X_STATE_CLOSED ||
2831 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002832 /* We can get here if the driver has been unloaded
2833 * during parity error recovery and is either waiting for a
2834 * leader to complete or for other functions to unload and
2835 * then ifdown has been issued. In this case we want to
2836 * unload and let other functions to complete a recovery
2837 * process.
2838 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002839 bp->recovery_state = BNX2X_RECOVERY_DONE;
2840 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002841 bnx2x_release_leader_lock(bp);
2842 smp_mb();
2843
Merav Sicron51c1a582012-03-18 10:33:38 +00002844 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2845 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002846 return -EINVAL;
2847 }
2848
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002849 /* Nothing to do during unload if previous bnx2x_nic_load()
2850 * have not completed succesfully - all resourses are released.
2851 *
2852 * we can get here only after unsuccessful ndo_* callback, during which
2853 * dev->IFF_UP flag is still on.
2854 */
2855 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2856 return 0;
2857
2858 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002859 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2860 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2861 */
2862 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2863 smp_mb();
2864
Merav Sicron55c11942012-11-07 00:45:48 +00002865 if (CNIC_LOADED(bp))
2866 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2867
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002868 /* Stop Tx */
2869 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002870 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002871
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002872 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002873
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002874 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002875
Ariel Eliorad5afc82013-01-01 05:22:26 +00002876 if (IS_PF(bp)) {
2877 /* Set ALWAYS_ALIVE bit in shmem */
2878 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2879 bnx2x_drv_pulse(bp);
2880 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2881 bnx2x_save_statistics(bp);
2882 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002883
Ariel Eliorad5afc82013-01-01 05:22:26 +00002884 /* wait till consumers catch up with producers in all queues */
2885 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002886
Ariel Elior9b176b62013-01-01 05:22:28 +00002887 /* if VF indicate to PF this function is going down (PF will delete sp
2888 * elements and clear initializations
2889 */
2890 if (IS_VF(bp))
2891 bnx2x_vfpf_close_vf(bp);
2892 else if (unload_mode != UNLOAD_RECOVERY)
2893 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002894 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002895 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002896 /* Send the UNLOAD_REQUEST to the MCP */
2897 bnx2x_send_unload_req(bp, unload_mode);
2898
2899 /*
2900 * Prevent transactions to host from the functions on the
2901 * engine that doesn't reset global blocks in case of global
2902 * attention once gloabl blocks are reset and gates are opened
2903 * (the engine which leader will perform the recovery
2904 * last).
2905 */
2906 if (!CHIP_IS_E1x(bp))
2907 bnx2x_pf_disable(bp);
2908
2909 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002910 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002911 /* Delete all NAPI objects */
2912 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002913 if (CNIC_LOADED(bp))
2914 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002915 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002916 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002917
2918 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002919 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002920 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002921
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002922 /*
2923 * At this stage no more interrupts will arrive so we may safly clean
2924 * the queueable objects here in case they failed to get cleaned so far.
2925 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002926 if (IS_PF(bp))
2927 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002928
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002929 /* There should be no more pending SP commands at this stage */
2930 bp->sp_state = 0;
2931
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002932 bp->port.pmf = 0;
2933
2934 /* Free SKBs, SGEs, TPA pool and driver internals */
2935 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002936 if (CNIC_LOADED(bp))
2937 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002938 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002939 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002940
Ariel Eliorad5afc82013-01-01 05:22:26 +00002941 bnx2x_free_fp_mem(bp);
2942 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00002943 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002944
Ariel Eliorad5afc82013-01-01 05:22:26 +00002945 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002946 if (CNIC_LOADED(bp))
2947 bnx2x_free_mem_cnic(bp);
Yuval Mintz2f7a3122013-04-24 01:45:01 +00002948 bnx2x_free_mem(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002949 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002950 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00002951 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002952
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002953 /* Check if there are pending parity attentions. If there are - set
2954 * RECOVERY_IN_PROGRESS.
2955 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002956 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002957 bnx2x_set_reset_in_progress(bp);
2958
2959 /* Set RESET_IS_GLOBAL if needed */
2960 if (global)
2961 bnx2x_set_reset_global(bp);
2962 }
2963
2964
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002965 /* The last driver must disable a "close the gate" if there is no
2966 * parity attention or "process kill" pending.
2967 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002968 if (IS_PF(bp) &&
2969 !bnx2x_clear_pf_load(bp) &&
2970 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002971 bnx2x_disable_close_the_gate(bp);
2972
Merav Sicron55c11942012-11-07 00:45:48 +00002973 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2974
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002975 return 0;
2976}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002977
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002978int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2979{
2980 u16 pmcsr;
2981
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002982 /* If there is no power capability, silently succeed */
2983 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002984 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002985 return 0;
2986 }
2987
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002988 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2989
2990 switch (state) {
2991 case PCI_D0:
2992 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2993 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2994 PCI_PM_CTRL_PME_STATUS));
2995
2996 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2997 /* delay required during transition out of D3hot */
2998 msleep(20);
2999 break;
3000
3001 case PCI_D3hot:
3002 /* If there are other clients above don't
3003 shut down the power */
3004 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3005 return 0;
3006 /* Don't shut down the power for emulation and FPGA */
3007 if (CHIP_REV_IS_SLOW(bp))
3008 return 0;
3009
3010 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3011 pmcsr |= 3;
3012
3013 if (bp->wol)
3014 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3015
3016 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3017 pmcsr);
3018
3019 /* No more memory access after this point until
3020 * device is brought back to D0.
3021 */
3022 break;
3023
3024 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003025 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003026 return -EINVAL;
3027 }
3028 return 0;
3029}
3030
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003031/*
3032 * net_device service functions
3033 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003034int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003035{
3036 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003037 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003038 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3039 napi);
3040 struct bnx2x *bp = fp->bp;
3041
3042 while (1) {
3043#ifdef BNX2X_STOP_ON_ERROR
3044 if (unlikely(bp->panic)) {
3045 napi_complete(napi);
3046 return 0;
3047 }
3048#endif
3049
Ariel Elior6383c0b2011-07-14 08:31:57 +00003050 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003051 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3052 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003053
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003054 if (bnx2x_has_rx_work(fp)) {
3055 work_done += bnx2x_rx_int(fp, budget - work_done);
3056
3057 /* must not complete if we consumed full budget */
3058 if (work_done >= budget)
3059 break;
3060 }
3061
3062 /* Fall out from the NAPI loop if needed */
3063 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003064
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003065 /* No need to update SB for FCoE L2 ring as long as
3066 * it's connected to the default SB and the SB
3067 * has been updated when NAPI was scheduled.
3068 */
3069 if (IS_FCOE_FP(fp)) {
3070 napi_complete(napi);
3071 break;
3072 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003073 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003074 /* bnx2x_has_rx_work() reads the status block,
3075 * thus we need to ensure that status block indices
3076 * have been actually read (bnx2x_update_fpsb_idx)
3077 * prior to this check (bnx2x_has_rx_work) so that
3078 * we won't write the "newer" value of the status block
3079 * to IGU (if there was a DMA right after
3080 * bnx2x_has_rx_work and if there is no rmb, the memory
3081 * reading (bnx2x_update_fpsb_idx) may be postponed
3082 * to right before bnx2x_ack_sb). In this case there
3083 * will never be another interrupt until there is
3084 * another update of the status block, while there
3085 * is still unhandled work.
3086 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003087 rmb();
3088
3089 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3090 napi_complete(napi);
3091 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003092 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003093 "Update index to %d\n", fp->fp_hc_idx);
3094 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3095 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003096 IGU_INT_ENABLE, 1);
3097 break;
3098 }
3099 }
3100 }
3101
3102 return work_done;
3103}
3104
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003105/* we split the first BD into headers and data BDs
3106 * to ease the pain of our fellow microcode engineers
3107 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003108 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003109static u16 bnx2x_tx_split(struct bnx2x *bp,
3110 struct bnx2x_fp_txdata *txdata,
3111 struct sw_tx_bd *tx_buf,
3112 struct eth_tx_start_bd **tx_bd, u16 hlen,
3113 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003114{
3115 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3116 struct eth_tx_bd *d_tx_bd;
3117 dma_addr_t mapping;
3118 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3119
3120 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121 h_tx_bd->nbytes = cpu_to_le16(hlen);
3122
Dmitry Kravkov91226792013-03-11 05:17:52 +00003123 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3124 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003125
3126 /* now get a new data BD
3127 * (after the pbd) and fill it */
3128 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003129 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003130
3131 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3132 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3133
3134 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3135 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3136 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3137
3138 /* this marks the BD as one that has no individual mapping */
3139 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3140
3141 DP(NETIF_MSG_TX_QUEUED,
3142 "TSO split data size is %d (%x:%x)\n",
3143 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3144
3145 /* update tx_bd */
3146 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3147
3148 return bd_prod;
3149}
3150
Yuval Mintz86564c32013-01-23 03:21:50 +00003151#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3152#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003153static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003154{
Yuval Mintz86564c32013-01-23 03:21:50 +00003155 __sum16 tsum = (__force __sum16) csum;
3156
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003157 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003158 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3159 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003160
3161 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003162 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3163 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003164
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003165 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003166}
3167
Dmitry Kravkov91226792013-03-11 05:17:52 +00003168static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003169{
3170 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003171 __u8 prot = 0;
3172 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003173
3174 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003175 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003176
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003177 protocol = vlan_get_protocol(skb);
3178 if (protocol == htons(ETH_P_IPV6)) {
3179 rc = XMIT_CSUM_V6;
3180 prot = ipv6_hdr(skb)->nexthdr;
3181 } else {
3182 rc = XMIT_CSUM_V4;
3183 prot = ip_hdr(skb)->protocol;
3184 }
3185
3186 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3187 if (inner_ip_hdr(skb)->version == 6) {
3188 rc |= XMIT_CSUM_ENC_V6;
3189 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003190 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003191 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003192 rc |= XMIT_CSUM_ENC_V4;
3193 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003194 rc |= XMIT_CSUM_TCP;
3195 }
3196 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003197 if (prot == IPPROTO_TCP)
3198 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003199
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003200 if (skb_is_gso_v6(skb)) {
3201 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3202 if (rc & XMIT_CSUM_ENC)
3203 rc |= XMIT_GSO_ENC_V6;
3204 } else if (skb_is_gso(skb)) {
3205 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3206 if (rc & XMIT_CSUM_ENC)
3207 rc |= XMIT_GSO_ENC_V4;
3208 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003209
3210 return rc;
3211}
3212
3213#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3214/* check if packet requires linearization (packet is too fragmented)
3215 no need to check fragmentation if page size > 8K (there will be no
3216 violation to FW restrictions) */
3217static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3218 u32 xmit_type)
3219{
3220 int to_copy = 0;
3221 int hlen = 0;
3222 int first_bd_sz = 0;
3223
3224 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3225 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3226
3227 if (xmit_type & XMIT_GSO) {
3228 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3229 /* Check if LSO packet needs to be copied:
3230 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3231 int wnd_size = MAX_FETCH_BD - 3;
3232 /* Number of windows to check */
3233 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3234 int wnd_idx = 0;
3235 int frag_idx = 0;
3236 u32 wnd_sum = 0;
3237
3238 /* Headers length */
3239 hlen = (int)(skb_transport_header(skb) - skb->data) +
3240 tcp_hdrlen(skb);
3241
3242 /* Amount of data (w/o headers) on linear part of SKB*/
3243 first_bd_sz = skb_headlen(skb) - hlen;
3244
3245 wnd_sum = first_bd_sz;
3246
3247 /* Calculate the first sum - it's special */
3248 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3249 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003250 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003251
3252 /* If there was data on linear skb data - check it */
3253 if (first_bd_sz > 0) {
3254 if (unlikely(wnd_sum < lso_mss)) {
3255 to_copy = 1;
3256 goto exit_lbl;
3257 }
3258
3259 wnd_sum -= first_bd_sz;
3260 }
3261
3262 /* Others are easier: run through the frag list and
3263 check all windows */
3264 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3265 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003266 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003267
3268 if (unlikely(wnd_sum < lso_mss)) {
3269 to_copy = 1;
3270 break;
3271 }
3272 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003273 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003274 }
3275 } else {
3276 /* in non-LSO too fragmented packet should always
3277 be linearized */
3278 to_copy = 1;
3279 }
3280 }
3281
3282exit_lbl:
3283 if (unlikely(to_copy))
3284 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003285 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003286 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3287 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3288
3289 return to_copy;
3290}
3291#endif
3292
Dmitry Kravkov91226792013-03-11 05:17:52 +00003293static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3294 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003295{
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003296 struct ipv6hdr *ipv6;
3297
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003298 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3299 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3300 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003301
3302 if (xmit_type & XMIT_GSO_ENC_V6)
3303 ipv6 = inner_ipv6_hdr(skb);
3304 else if (xmit_type & XMIT_GSO_V6)
3305 ipv6 = ipv6_hdr(skb);
3306 else
3307 ipv6 = NULL;
3308
3309 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003310 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003311}
3312
3313/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003314 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003315 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003316 * @skb: packet skb
3317 * @pbd: parse BD
3318 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003319 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003320static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3321 struct eth_tx_parse_bd_e1x *pbd,
Yuval Mintz057cf652013-05-19 04:41:01 +00003322 struct eth_tx_start_bd *tx_start_bd,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003323 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003324{
3325 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003326 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003327 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003328
3329 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003330 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003331 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003332 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3333 ip_hdr(skb)->daddr,
3334 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003335
Yuval Mintz057cf652013-05-19 04:41:01 +00003336 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3337 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3338 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003339 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003340 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3341 &ipv6_hdr(skb)->daddr,
3342 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003343 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003344
Yuval Mintz86564c32013-01-23 03:21:50 +00003345 pbd->global_data |=
3346 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003347}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003348
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003349/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003350 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3351 *
3352 * @bp: driver handle
3353 * @skb: packet skb
3354 * @parsing_data: data to be updated
3355 * @xmit_type: xmit flags
3356 *
3357 * 57712/578xx related, when skb has encapsulation
3358 */
3359static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3360 u32 *parsing_data, u32 xmit_type)
3361{
3362 *parsing_data |=
3363 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3364 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3365 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3366
3367 if (xmit_type & XMIT_CSUM_TCP) {
3368 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3369 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3370 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3371
3372 return skb_inner_transport_header(skb) +
3373 inner_tcp_hdrlen(skb) - skb->data;
3374 }
3375
3376 /* We support checksum offload for TCP and UDP only.
3377 * No need to pass the UDP header length - it's a constant.
3378 */
3379 return skb_inner_transport_header(skb) +
3380 sizeof(struct udphdr) - skb->data;
3381}
3382
3383/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003384 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003385 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003386 * @bp: driver handle
3387 * @skb: packet skb
3388 * @parsing_data: data to be updated
3389 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003390 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003391 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003392 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003393static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3394 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003395{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003396 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003397 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003398 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3399 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003400
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003401 if (xmit_type & XMIT_CSUM_TCP) {
3402 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3403 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3404 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003405
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003406 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003407 }
3408 /* We support checksum offload for TCP and UDP only.
3409 * No need to pass the UDP header length - it's a constant.
3410 */
3411 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003412}
3413
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003414/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003415static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3416 struct eth_tx_start_bd *tx_start_bd,
3417 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003418{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003419 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3420
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003421 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003422 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003423
3424 if (!(xmit_type & XMIT_CSUM_TCP))
3425 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003426}
3427
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003428/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003429 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003430 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003431 * @bp: driver handle
3432 * @skb: packet skb
3433 * @pbd: parse BD to be updated
3434 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003435 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003436static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3437 struct eth_tx_parse_bd_e1x *pbd,
3438 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003439{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003440 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003441
3442 /* for now NS flag is not used in Linux */
3443 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003444 cpu_to_le16(hlen |
3445 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3446 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003447
3448 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003449 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003450
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003451 hlen += pbd->ip_hlen_w;
3452
3453 /* We support checksum offload for TCP and UDP only */
3454 if (xmit_type & XMIT_CSUM_TCP)
3455 hlen += tcp_hdrlen(skb) / 2;
3456 else
3457 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003458
3459 pbd->total_hlen_w = cpu_to_le16(hlen);
3460 hlen = hlen*2;
3461
3462 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003463 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003464
3465 } else {
3466 s8 fix = SKB_CS_OFF(skb); /* signed! */
3467
3468 DP(NETIF_MSG_TX_QUEUED,
3469 "hlen %d fix %d csum before fix %x\n",
3470 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3471
3472 /* HW bug: fixup the CSUM */
3473 pbd->tcp_pseudo_csum =
3474 bnx2x_csum_fix(skb_transport_header(skb),
3475 SKB_CS(skb), fix);
3476
3477 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3478 pbd->tcp_pseudo_csum);
3479 }
3480
3481 return hlen;
3482}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003483
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003484static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3485 struct eth_tx_parse_bd_e2 *pbd_e2,
3486 struct eth_tx_parse_2nd_bd *pbd2,
3487 u16 *global_data,
3488 u32 xmit_type)
3489{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003490 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003491 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove287a752013-03-21 15:38:24 +00003492 /* from outer IP to transport */
3493 hlen_w = (skb_inner_transport_header(skb) -
3494 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003495
3496 /* transport len */
3497 if (xmit_type & XMIT_CSUM_TCP)
Dmitry Kravkove287a752013-03-21 15:38:24 +00003498 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003499 else
Dmitry Kravkove287a752013-03-21 15:38:24 +00003500 hlen_w += sizeof(struct udphdr) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003501
Dmitry Kravkove287a752013-03-21 15:38:24 +00003502 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003503
3504 if (xmit_type & XMIT_CSUM_ENC_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003505 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003506 pbd2->fw_ip_csum_wo_len_flags_frag =
3507 bswab16(csum_fold((~iph->check) -
3508 iph->tot_len - iph->frag_off));
3509 } else {
3510 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003511 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003512 }
3513
3514 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3515
3516 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3517
3518 if (xmit_type & XMIT_GSO_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003519 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003520
3521 pbd_e2->data.tunnel_data.pseudo_csum =
3522 bswab16(~csum_tcpudp_magic(
3523 inner_ip_hdr(skb)->saddr,
3524 inner_ip_hdr(skb)->daddr,
3525 0, IPPROTO_TCP, 0));
3526
3527 outerip_len = ip_hdr(skb)->ihl << 1;
3528 } else {
3529 pbd_e2->data.tunnel_data.pseudo_csum =
3530 bswab16(~csum_ipv6_magic(
3531 &inner_ipv6_hdr(skb)->saddr,
3532 &inner_ipv6_hdr(skb)->daddr,
3533 0, IPPROTO_TCP, 0));
3534 }
3535
3536 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3537
3538 *global_data |=
3539 outerip_off |
3540 (!!(xmit_type & XMIT_CSUM_V6) <<
3541 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3542 (outerip_len <<
3543 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3544 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3545 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003546
3547 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3548 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3549 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3550 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003551}
3552
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003553/* called with netif_tx_lock
3554 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3555 * netif_wake_queue()
3556 */
3557netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3558{
3559 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003560
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003561 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003562 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003563 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003564 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003565 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003566 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003567 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003568 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003569 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003570 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003571 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003572 dma_addr_t mapping;
3573 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3574 int i;
3575 u8 hlen = 0;
3576 __le16 pkt_size = 0;
3577 struct ethhdr *eth;
3578 u8 mac_type = UNICAST_ADDRESS;
3579
3580#ifdef BNX2X_STOP_ON_ERROR
3581 if (unlikely(bp->panic))
3582 return NETDEV_TX_BUSY;
3583#endif
3584
Ariel Elior6383c0b2011-07-14 08:31:57 +00003585 txq_index = skb_get_queue_mapping(skb);
3586 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003587
Merav Sicron55c11942012-11-07 00:45:48 +00003588 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003589
Merav Sicron65565882012-06-19 07:48:26 +00003590 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003591
3592 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003593 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003594 txq_index, fp_index, txdata_index); */
3595
Ariel Elior6383c0b2011-07-14 08:31:57 +00003596 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003597 DP(NETIF_MSG_TX_QUEUED,
3598 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003599 txdata->cid, fp_index, txdata_index, txdata, fp); */
3600
3601 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003602 skb_shinfo(skb)->nr_frags +
3603 BDS_PER_TX_PKT +
3604 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003605 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003606 if (txdata->tx_ring_size == 0) {
3607 struct bnx2x_eth_q_stats *q_stats =
3608 bnx2x_fp_qstats(bp, txdata->parent_fp);
3609 q_stats->driver_filtered_tx_pkt++;
3610 dev_kfree_skb(skb);
3611 return NETDEV_TX_OK;
3612 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003613 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3614 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003615 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003616
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003617 return NETDEV_TX_BUSY;
3618 }
3619
Merav Sicron51c1a582012-03-18 10:33:38 +00003620 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003621 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003622 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003623 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3624 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003625
3626 eth = (struct ethhdr *)skb->data;
3627
3628 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3629 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3630 if (is_broadcast_ether_addr(eth->h_dest))
3631 mac_type = BROADCAST_ADDRESS;
3632 else
3633 mac_type = MULTICAST_ADDRESS;
3634 }
3635
Dmitry Kravkov91226792013-03-11 05:17:52 +00003636#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003637 /* First, check if we need to linearize the skb (due to FW
3638 restrictions). No need to check fragmentation if page size > 8K
3639 (there will be no violation to FW restrictions) */
3640 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3641 /* Statistics of linearization */
3642 bp->lin_cnt++;
3643 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003644 DP(NETIF_MSG_TX_QUEUED,
3645 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003646 dev_kfree_skb_any(skb);
3647 return NETDEV_TX_OK;
3648 }
3649 }
3650#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003651 /* Map skb linear data for DMA */
3652 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3653 skb_headlen(skb), DMA_TO_DEVICE);
3654 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003655 DP(NETIF_MSG_TX_QUEUED,
3656 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003657 dev_kfree_skb_any(skb);
3658 return NETDEV_TX_OK;
3659 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003660 /*
3661 Please read carefully. First we use one BD which we mark as start,
3662 then we have a parsing info BD (used for TSO or xsum),
3663 and only then we have the rest of the TSO BDs.
3664 (don't forget to mark the last one as last,
3665 and to unmap only AFTER you write to the BD ...)
3666 And above all, all pdb sizes are in words - NOT DWORDS!
3667 */
3668
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003669 /* get current pkt produced now - advance it just before sending packet
3670 * since mapping of pages may fail and cause packet to be dropped
3671 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003672 pkt_prod = txdata->tx_pkt_prod;
3673 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003674
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003675 /* get a tx_buf and first BD
3676 * tx_start_bd may be changed during SPLIT,
3677 * but first_bd will always stay first
3678 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003679 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3680 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003681 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003682
3683 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003684
Dmitry Kravkov91226792013-03-11 05:17:52 +00003685 /* header nbd: indirectly zero other flags! */
3686 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003687
3688 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003689 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003690 tx_buf->skb = skb;
3691 tx_buf->flags = 0;
3692
3693 DP(NETIF_MSG_TX_QUEUED,
3694 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003695 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003696
Jesse Grosseab6d182010-10-20 13:56:03 +00003697 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003698 tx_start_bd->vlan_or_ethertype =
3699 cpu_to_le16(vlan_tx_tag_get(skb));
3700 tx_start_bd->bd_flags.as_bitfield |=
3701 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003702 } else {
3703 /* when transmitting in a vf, start bd must hold the ethertype
3704 * for fw to enforce it
3705 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003706 if (IS_VF(bp))
Ariel Eliordc1ba592013-01-01 05:22:30 +00003707 tx_start_bd->vlan_or_ethertype =
3708 cpu_to_le16(ntohs(eth->h_proto));
Dmitry Kravkov91226792013-03-11 05:17:52 +00003709 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003710 /* used by FW for packet accounting */
3711 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003712 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003713
Dmitry Kravkov91226792013-03-11 05:17:52 +00003714 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3715
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003716 /* turn on parsing and get a BD */
3717 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003718
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003719 if (xmit_type & XMIT_CSUM)
3720 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003721
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003722 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003723 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003724 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003725
3726 if (xmit_type & XMIT_CSUM_ENC) {
3727 u16 global_data = 0;
3728
3729 /* Set PBD in enc checksum offload case */
3730 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3731 &pbd_e2_parsing_data,
3732 xmit_type);
3733
3734 /* turn on 2nd parsing and get a BD */
3735 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3736
3737 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3738
3739 memset(pbd2, 0, sizeof(*pbd2));
3740
3741 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3742 (skb_inner_network_header(skb) -
3743 skb->data) >> 1;
3744
3745 if (xmit_type & XMIT_GSO_ENC)
3746 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3747 &global_data,
3748 xmit_type);
3749
3750 pbd2->global_data = cpu_to_le16(global_data);
3751
3752 /* add addition parse BD indication to start BD */
3753 SET_FLAG(tx_start_bd->general_data,
3754 ETH_TX_START_BD_PARSE_NBDS, 1);
3755 /* set encapsulation flag in start BD */
3756 SET_FLAG(tx_start_bd->general_data,
3757 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3758 nbd++;
3759 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003760 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003761 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3762 &pbd_e2_parsing_data,
3763 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003764 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003765
Dmitry Kravkov91226792013-03-11 05:17:52 +00003766 /* Add the macs to the parsing BD this is a vf */
3767 if (IS_VF(bp)) {
3768 /* override GRE parameters in BD */
3769 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3770 &pbd_e2->data.mac_addr.src_mid,
3771 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003772 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003773
3774 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3775 &pbd_e2->data.mac_addr.dst_mid,
3776 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003777 eth->h_dest);
3778 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003779
3780 SET_FLAG(pbd_e2_parsing_data,
3781 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003782 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003783 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003784 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003785 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3786 /* Set PBD in checksum offload case */
3787 if (xmit_type & XMIT_CSUM)
3788 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003789
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003790 SET_FLAG(global_data,
3791 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3792 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003793 }
3794
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003795 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003796 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3797 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003798 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3799 pkt_size = tx_start_bd->nbytes;
3800
Merav Sicron51c1a582012-03-18 10:33:38 +00003801 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003802 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003803 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003804 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003805 tx_start_bd->bd_flags.as_bitfield,
3806 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003807
3808 if (xmit_type & XMIT_GSO) {
3809
3810 DP(NETIF_MSG_TX_QUEUED,
3811 "TSO packet len %d hlen %d total len %d tso size %d\n",
3812 skb->len, hlen, skb_headlen(skb),
3813 skb_shinfo(skb)->gso_size);
3814
3815 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3816
Dmitry Kravkov91226792013-03-11 05:17:52 +00003817 if (unlikely(skb_headlen(skb) > hlen)) {
3818 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003819 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3820 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003821 bd_prod);
3822 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003823 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003824 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3825 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003826 else
Yuval Mintz057cf652013-05-19 04:41:01 +00003827 bnx2x_set_pbd_gso(skb, pbd_e1x, tx_start_bd,
3828 xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003829 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003830
3831 /* Set the PBD's parsing_data field if not zero
3832 * (for the chips newer than 57711).
3833 */
3834 if (pbd_e2_parsing_data)
3835 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3836
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003837 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3838
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003839 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003840 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3841 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3842
Eric Dumazet9e903e02011-10-18 21:00:24 +00003843 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3844 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003845 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003846 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003847
Merav Sicron51c1a582012-03-18 10:33:38 +00003848 DP(NETIF_MSG_TX_QUEUED,
3849 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003850
3851 /* we need unmap all buffers already mapped
3852 * for this SKB;
3853 * first_bd->nbd need to be properly updated
3854 * before call to bnx2x_free_tx_pkt
3855 */
3856 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003857 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003858 TX_BD(txdata->tx_pkt_prod),
3859 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003860 return NETDEV_TX_OK;
3861 }
3862
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003863 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003864 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003865 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003866 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003867
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003868 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3869 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003870 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3871 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003872 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003873
3874 DP(NETIF_MSG_TX_QUEUED,
3875 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3876 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3877 le16_to_cpu(tx_data_bd->nbytes));
3878 }
3879
3880 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3881
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003882 /* update with actual num BDs */
3883 first_bd->nbd = cpu_to_le16(nbd);
3884
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003885 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3886
3887 /* now send a tx doorbell, counting the next BD
3888 * if the packet contains or ends with it
3889 */
3890 if (TX_BD_POFF(bd_prod) < nbd)
3891 nbd++;
3892
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003893 /* total_pkt_bytes should be set on the first data BD if
3894 * it's not an LSO packet and there is more than one
3895 * data BD. In this case pkt_size is limited by an MTU value.
3896 * However we prefer to set it for an LSO packet (while we don't
3897 * have to) in order to save some CPU cycles in a none-LSO
3898 * case, when we much more care about them.
3899 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003900 if (total_pkt_bd != NULL)
3901 total_pkt_bd->total_pkt_bytes = pkt_size;
3902
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003903 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003904 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003905 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003906 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3907 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3908 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3909 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003910 if (pbd_e2)
3911 DP(NETIF_MSG_TX_QUEUED,
3912 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00003913 pbd_e2,
3914 pbd_e2->data.mac_addr.dst_hi,
3915 pbd_e2->data.mac_addr.dst_mid,
3916 pbd_e2->data.mac_addr.dst_lo,
3917 pbd_e2->data.mac_addr.src_hi,
3918 pbd_e2->data.mac_addr.src_mid,
3919 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003920 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003921 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3922
Tom Herbert2df1a702011-11-28 16:33:37 +00003923 netdev_tx_sent_queue(txq, skb->len);
3924
Willem de Bruijn8373c572012-04-27 09:04:06 +00003925 skb_tx_timestamp(skb);
3926
Ariel Elior6383c0b2011-07-14 08:31:57 +00003927 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003928 /*
3929 * Make sure that the BD data is updated before updating the producer
3930 * since FW might read the BD right after the producer is updated.
3931 * This is only applicable for weak-ordered memory model archs such
3932 * as IA-64. The following barrier is also mandatory since FW will
3933 * assumes packets must have BDs.
3934 */
3935 wmb();
3936
Ariel Elior6383c0b2011-07-14 08:31:57 +00003937 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003938 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003939
Ariel Elior6383c0b2011-07-14 08:31:57 +00003940 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003941
3942 mmiowb();
3943
Ariel Elior6383c0b2011-07-14 08:31:57 +00003944 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003945
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003946 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003947 netif_tx_stop_queue(txq);
3948
3949 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3950 * ordering of set_bit() in netif_tx_stop_queue() and read of
3951 * fp->bd_tx_cons */
3952 smp_mb();
3953
Barak Witkowski15192a82012-06-19 07:48:28 +00003954 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003955 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003956 netif_tx_wake_queue(txq);
3957 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003958 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003959
3960 return NETDEV_TX_OK;
3961}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003962
Ariel Elior6383c0b2011-07-14 08:31:57 +00003963/**
3964 * bnx2x_setup_tc - routine to configure net_device for multi tc
3965 *
3966 * @netdev: net device to configure
3967 * @tc: number of traffic classes to enable
3968 *
3969 * callback connected to the ndo_setup_tc function pointer
3970 */
3971int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3972{
3973 int cos, prio, count, offset;
3974 struct bnx2x *bp = netdev_priv(dev);
3975
3976 /* setup tc must be called under rtnl lock */
3977 ASSERT_RTNL();
3978
3979 /* no traffic classes requested. aborting */
3980 if (!num_tc) {
3981 netdev_reset_tc(dev);
3982 return 0;
3983 }
3984
3985 /* requested to support too many traffic classes */
3986 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003987 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3988 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003989 return -EINVAL;
3990 }
3991
3992 /* declare amount of supported traffic classes */
3993 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003994 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003995 return -EINVAL;
3996 }
3997
3998 /* configure priority to traffic class mapping */
3999 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4000 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004001 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4002 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004003 prio, bp->prio_to_cos[prio]);
4004 }
4005
4006
4007 /* Use this configuration to diffrentiate tc0 from other COSes
4008 This can be used for ets or pfc, and save the effort of setting
4009 up a multio class queue disc or negotiating DCBX with a switch
4010 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004011 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004012 for (prio = 1; prio < 16; prio++) {
4013 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004014 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004015 } */
4016
4017 /* configure traffic class to transmission queue mapping */
4018 for (cos = 0; cos < bp->max_cos; cos++) {
4019 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004020 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004021 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004022 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4023 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004024 cos, offset, count);
4025 }
4026
4027 return 0;
4028}
4029
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004030/* called with rtnl_lock */
4031int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4032{
4033 struct sockaddr *addr = p;
4034 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004035 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004036
Merav Sicron51c1a582012-03-18 10:33:38 +00004037 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4038 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004039 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004040 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004041
Barak Witkowskia3348722012-04-23 03:04:46 +00004042 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4043 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004044 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004045 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004046 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004047
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004048 if (netif_running(dev)) {
4049 rc = bnx2x_set_eth_mac(bp, false);
4050 if (rc)
4051 return rc;
4052 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004053
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004054 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4055
4056 if (netif_running(dev))
4057 rc = bnx2x_set_eth_mac(bp, true);
4058
4059 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004060}
4061
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004062static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4063{
4064 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4065 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004066 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004067
4068 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004069
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004070 if (IS_FCOE_IDX(fp_index)) {
4071 memset(sb, 0, sizeof(union host_hc_status_block));
4072 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004073 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004074 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004075 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004076 BNX2X_PCI_FREE(sb->e2_sb,
4077 bnx2x_fp(bp, fp_index,
4078 status_blk_mapping),
4079 sizeof(struct host_hc_status_block_e2));
4080 else
4081 BNX2X_PCI_FREE(sb->e1x_sb,
4082 bnx2x_fp(bp, fp_index,
4083 status_blk_mapping),
4084 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004085 }
Merav Sicron55c11942012-11-07 00:45:48 +00004086
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004087 /* Rx */
4088 if (!skip_rx_queue(bp, fp_index)) {
4089 bnx2x_free_rx_bds(fp);
4090
4091 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4092 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4093 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4094 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4095 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4096
4097 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4098 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4099 sizeof(struct eth_fast_path_rx_cqe) *
4100 NUM_RCQ_BD);
4101
4102 /* SGE ring */
4103 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4104 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4105 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4106 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4107 }
4108
4109 /* Tx */
4110 if (!skip_tx_queue(bp, fp_index)) {
4111 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004112 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004113 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004114
Merav Sicron51c1a582012-03-18 10:33:38 +00004115 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004116 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004117 fp_index, cos, txdata->cid);
4118
4119 BNX2X_FREE(txdata->tx_buf_ring);
4120 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4121 txdata->tx_desc_mapping,
4122 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4123 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004124 }
4125 /* end of fastpath */
4126}
4127
Merav Sicron55c11942012-11-07 00:45:48 +00004128void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4129{
4130 int i;
4131 for_each_cnic_queue(bp, i)
4132 bnx2x_free_fp_mem_at(bp, i);
4133}
4134
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004135void bnx2x_free_fp_mem(struct bnx2x *bp)
4136{
4137 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004138 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004139 bnx2x_free_fp_mem_at(bp, i);
4140}
4141
Eric Dumazet1191cb82012-04-27 21:39:21 +00004142static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004143{
4144 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004145 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004146 bnx2x_fp(bp, index, sb_index_values) =
4147 (__le16 *)status_blk.e2_sb->sb.index_values;
4148 bnx2x_fp(bp, index, sb_running_index) =
4149 (__le16 *)status_blk.e2_sb->sb.running_index;
4150 } else {
4151 bnx2x_fp(bp, index, sb_index_values) =
4152 (__le16 *)status_blk.e1x_sb->sb.index_values;
4153 bnx2x_fp(bp, index, sb_running_index) =
4154 (__le16 *)status_blk.e1x_sb->sb.running_index;
4155 }
4156}
4157
Eric Dumazet1191cb82012-04-27 21:39:21 +00004158/* Returns the number of actually allocated BDs */
4159static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4160 int rx_ring_size)
4161{
4162 struct bnx2x *bp = fp->bp;
4163 u16 ring_prod, cqe_ring_prod;
4164 int i, failure_cnt = 0;
4165
4166 fp->rx_comp_cons = 0;
4167 cqe_ring_prod = ring_prod = 0;
4168
4169 /* This routine is called only during fo init so
4170 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4171 */
4172 for (i = 0; i < rx_ring_size; i++) {
4173 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4174 failure_cnt++;
4175 continue;
4176 }
4177 ring_prod = NEXT_RX_IDX(ring_prod);
4178 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4179 WARN_ON(ring_prod <= (i - failure_cnt));
4180 }
4181
4182 if (failure_cnt)
4183 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4184 i - failure_cnt, fp->index);
4185
4186 fp->rx_bd_prod = ring_prod;
4187 /* Limit the CQE producer by the CQE ring size */
4188 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4189 cqe_ring_prod);
4190 fp->rx_pkt = fp->rx_calls = 0;
4191
Barak Witkowski15192a82012-06-19 07:48:28 +00004192 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004193
4194 return i - failure_cnt;
4195}
4196
4197static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4198{
4199 int i;
4200
4201 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4202 struct eth_rx_cqe_next_page *nextpg;
4203
4204 nextpg = (struct eth_rx_cqe_next_page *)
4205 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4206 nextpg->addr_hi =
4207 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4208 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4209 nextpg->addr_lo =
4210 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4211 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4212 }
4213}
4214
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004215static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4216{
4217 union host_hc_status_block *sb;
4218 struct bnx2x_fastpath *fp = &bp->fp[index];
4219 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004220 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004221 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004222
Barak Witkowskia3348722012-04-23 03:04:46 +00004223 if (!bp->rx_ring_size &&
4224 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004225 rx_ring_size = MIN_RX_SIZE_NONTPA;
4226 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004227 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004228 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4229
Yuval Mintz065f8b92012-10-03 04:22:59 +00004230 if (CHIP_IS_E3(bp)) {
4231 u32 cfg = SHMEM_RD(bp,
4232 dev_info.port_hw_config[BP_PORT(bp)].
4233 default_cfg);
4234
4235 /* Decrease ring size for 1G functions */
4236 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4237 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4238 rx_ring_size /= 10;
4239 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004240
David S. Miller8decf862011-09-22 03:23:13 -04004241 /* allocate at least number of buffers required by FW */
4242 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4243 MIN_RX_SIZE_TPA, rx_ring_size);
4244
4245 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004246 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004247 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004248
Yuval Mintz04c46732013-01-23 03:21:46 +00004249 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4250
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004251 /* Common */
4252 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004253
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004254 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004255 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004256 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004257 BNX2X_PCI_ALLOC(sb->e2_sb,
4258 &bnx2x_fp(bp, index, status_blk_mapping),
4259 sizeof(struct host_hc_status_block_e2));
4260 else
4261 BNX2X_PCI_ALLOC(sb->e1x_sb,
4262 &bnx2x_fp(bp, index, status_blk_mapping),
4263 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004264 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004265
4266 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4267 * set shortcuts for it.
4268 */
4269 if (!IS_FCOE_IDX(index))
4270 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004271
4272 /* Tx */
4273 if (!skip_tx_queue(bp, index)) {
4274 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004275 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004276 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004277
Merav Sicron51c1a582012-03-18 10:33:38 +00004278 DP(NETIF_MSG_IFUP,
4279 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004280 index, cos);
4281
4282 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004283 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004284 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4285 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004286 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004287 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004288 }
4289
4290 /* Rx */
4291 if (!skip_rx_queue(bp, index)) {
4292 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4293 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4294 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4295 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4296 &bnx2x_fp(bp, index, rx_desc_mapping),
4297 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4298
4299 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4300 &bnx2x_fp(bp, index, rx_comp_mapping),
4301 sizeof(struct eth_fast_path_rx_cqe) *
4302 NUM_RCQ_BD);
4303
4304 /* SGE ring */
4305 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4306 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4307 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4308 &bnx2x_fp(bp, index, rx_sge_mapping),
4309 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4310 /* RX BD ring */
4311 bnx2x_set_next_page_rx_bd(fp);
4312
4313 /* CQ ring */
4314 bnx2x_set_next_page_rx_cq(fp);
4315
4316 /* BDs */
4317 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4318 if (ring_size < rx_ring_size)
4319 goto alloc_mem_err;
4320 }
4321
4322 return 0;
4323
4324/* handles low memory cases */
4325alloc_mem_err:
4326 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4327 index, ring_size);
4328 /* FW will drop all packets if queue is not big enough,
4329 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004330 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004331 */
4332 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004333 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004334 /* release memory allocated for this queue */
4335 bnx2x_free_fp_mem_at(bp, index);
4336 return -ENOMEM;
4337 }
4338 return 0;
4339}
4340
Merav Sicron55c11942012-11-07 00:45:48 +00004341int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004342{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004343 if (!NO_FCOE(bp))
4344 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004345 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004346 /* we will fail load process instead of mark
4347 * NO_FCOE_FLAG
4348 */
4349 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004350
4351 return 0;
4352}
4353
4354int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4355{
4356 int i;
4357
4358 /* 1. Allocate FP for leading - fatal if error
4359 * 2. Allocate RSS - fix number of queues if error
4360 */
4361
4362 /* leading */
4363 if (bnx2x_alloc_fp_mem_at(bp, 0))
4364 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004365
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004366 /* RSS */
4367 for_each_nondefault_eth_queue(bp, i)
4368 if (bnx2x_alloc_fp_mem_at(bp, i))
4369 break;
4370
4371 /* handle memory failures */
4372 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4373 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4374
4375 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004376 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004377 if (CNIC_SUPPORT(bp))
4378 /* move non eth FPs next to last eth FP
4379 * must be done in that order
4380 * FCOE_IDX < FWD_IDX < OOO_IDX
4381 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004382
Merav Sicron55c11942012-11-07 00:45:48 +00004383 /* move FCoE fp even NO_FCOE_FLAG is on */
4384 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4385 bp->num_ethernet_queues -= delta;
4386 bp->num_queues = bp->num_ethernet_queues +
4387 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004388 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4389 bp->num_queues + delta, bp->num_queues);
4390 }
4391
4392 return 0;
4393}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004394
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004395void bnx2x_free_mem_bp(struct bnx2x *bp)
4396{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004397 int i;
4398
4399 for (i = 0; i < bp->fp_array_size; i++)
4400 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004401 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004402 kfree(bp->sp_objs);
4403 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004404 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004405 kfree(bp->msix_table);
4406 kfree(bp->ilt);
4407}
4408
Bill Pemberton0329aba2012-12-03 09:24:24 -05004409int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004410{
4411 struct bnx2x_fastpath *fp;
4412 struct msix_entry *tbl;
4413 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004414 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004415 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004416 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004417
Ariel Elior6383c0b2011-07-14 08:31:57 +00004418 /*
4419 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004420 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004421 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004422 msix_table_size = bp->igu_sb_cnt;
4423 if (IS_PF(bp))
4424 msix_table_size++;
4425 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004426
4427 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004428 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004429 bp->fp_array_size = fp_array_size;
4430 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004431
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004432 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004433 if (!fp)
4434 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004435 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004436 fp[i].tpa_info =
4437 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4438 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4439 if (!(fp[i].tpa_info))
4440 goto alloc_err;
4441 }
4442
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004443 bp->fp = fp;
4444
Barak Witkowski15192a82012-06-19 07:48:28 +00004445 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004446 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004447 GFP_KERNEL);
4448 if (!bp->sp_objs)
4449 goto alloc_err;
4450
4451 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004452 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004453 GFP_KERNEL);
4454 if (!bp->fp_stats)
4455 goto alloc_err;
4456
Merav Sicron65565882012-06-19 07:48:26 +00004457 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004458 txq_array_size =
4459 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4460 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4461
4462 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4463 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004464 if (!bp->bnx2x_txq)
4465 goto alloc_err;
4466
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004467 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004468 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004469 if (!tbl)
4470 goto alloc_err;
4471 bp->msix_table = tbl;
4472
4473 /* ilt */
4474 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4475 if (!ilt)
4476 goto alloc_err;
4477 bp->ilt = ilt;
4478
4479 return 0;
4480alloc_err:
4481 bnx2x_free_mem_bp(bp);
4482 return -ENOMEM;
4483
4484}
4485
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004486int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004487{
4488 struct bnx2x *bp = netdev_priv(dev);
4489
4490 if (unlikely(!netif_running(dev)))
4491 return 0;
4492
Yuval Mintz5d07d862012-09-13 02:56:21 +00004493 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004494 return bnx2x_nic_load(bp, LOAD_NORMAL);
4495}
4496
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004497int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4498{
4499 u32 sel_phy_idx = 0;
4500 if (bp->link_params.num_phys <= 1)
4501 return INT_PHY;
4502
4503 if (bp->link_vars.link_up) {
4504 sel_phy_idx = EXT_PHY1;
4505 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4506 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4507 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4508 sel_phy_idx = EXT_PHY2;
4509 } else {
4510
4511 switch (bnx2x_phy_selection(&bp->link_params)) {
4512 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4513 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4514 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4515 sel_phy_idx = EXT_PHY1;
4516 break;
4517 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4518 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4519 sel_phy_idx = EXT_PHY2;
4520 break;
4521 }
4522 }
4523
4524 return sel_phy_idx;
4525
4526}
4527int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4528{
4529 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4530 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004531 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004532 * swapping is enabled). So when swapping is enabled, we need to reverse
4533 * the configuration
4534 */
4535
4536 if (bp->link_params.multi_phy_config &
4537 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4538 if (sel_phy_idx == EXT_PHY1)
4539 sel_phy_idx = EXT_PHY2;
4540 else if (sel_phy_idx == EXT_PHY2)
4541 sel_phy_idx = EXT_PHY1;
4542 }
4543 return LINK_CONFIG_IDX(sel_phy_idx);
4544}
4545
Merav Sicron55c11942012-11-07 00:45:48 +00004546#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004547int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4548{
4549 struct bnx2x *bp = netdev_priv(dev);
4550 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4551
4552 switch (type) {
4553 case NETDEV_FCOE_WWNN:
4554 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4555 cp->fcoe_wwn_node_name_lo);
4556 break;
4557 case NETDEV_FCOE_WWPN:
4558 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4559 cp->fcoe_wwn_port_name_lo);
4560 break;
4561 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004562 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004563 return -EINVAL;
4564 }
4565
4566 return 0;
4567}
4568#endif
4569
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004570/* called with rtnl_lock */
4571int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4572{
4573 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004574
4575 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004576 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004577 return -EAGAIN;
4578 }
4579
4580 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004581 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4582 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004583 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004584 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004585
4586 /* This does not race with packet allocation
4587 * because the actual alloc size is
4588 * only updated as part of load
4589 */
4590 dev->mtu = new_mtu;
4591
Michał Mirosław66371c42011-04-12 09:38:23 +00004592 return bnx2x_reload_if_running(dev);
4593}
4594
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004595netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004596 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004597{
4598 struct bnx2x *bp = netdev_priv(dev);
4599
4600 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004601 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004602 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004603 features &= ~NETIF_F_GRO;
4604 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004605
4606 return features;
4607}
4608
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004609int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004610{
4611 struct bnx2x *bp = netdev_priv(dev);
4612 u32 flags = bp->flags;
Eric Dumazet8802f572013-05-18 07:14:53 +00004613 u32 changes;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004614 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004615
4616 if (features & NETIF_F_LRO)
4617 flags |= TPA_ENABLE_FLAG;
4618 else
4619 flags &= ~TPA_ENABLE_FLAG;
4620
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004621 if (features & NETIF_F_GRO)
4622 flags |= GRO_ENABLE_FLAG;
4623 else
4624 flags &= ~GRO_ENABLE_FLAG;
4625
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004626 if (features & NETIF_F_LOOPBACK) {
4627 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4628 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4629 bnx2x_reload = true;
4630 }
4631 } else {
4632 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4633 bp->link_params.loopback_mode = LOOPBACK_NONE;
4634 bnx2x_reload = true;
4635 }
4636 }
4637
Eric Dumazet8802f572013-05-18 07:14:53 +00004638 changes = flags ^ bp->flags;
4639
4640 /* if GRO is changed while LRO is enabled, dont force a reload */
4641 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4642 changes &= ~GRO_ENABLE_FLAG;
4643
4644 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004645 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004646
4647 bp->flags = flags;
Michał Mirosław66371c42011-04-12 09:38:23 +00004648
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004649 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004650 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4651 return bnx2x_reload_if_running(dev);
4652 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004653 }
4654
Michał Mirosław66371c42011-04-12 09:38:23 +00004655 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004656}
4657
4658void bnx2x_tx_timeout(struct net_device *dev)
4659{
4660 struct bnx2x *bp = netdev_priv(dev);
4661
4662#ifdef BNX2X_STOP_ON_ERROR
4663 if (!bp->panic)
4664 bnx2x_panic();
4665#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004666
4667 smp_mb__before_clear_bit();
4668 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4669 smp_mb__after_clear_bit();
4670
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004671 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004672 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004673}
4674
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004675int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4676{
4677 struct net_device *dev = pci_get_drvdata(pdev);
4678 struct bnx2x *bp;
4679
4680 if (!dev) {
4681 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4682 return -ENODEV;
4683 }
4684 bp = netdev_priv(dev);
4685
4686 rtnl_lock();
4687
4688 pci_save_state(pdev);
4689
4690 if (!netif_running(dev)) {
4691 rtnl_unlock();
4692 return 0;
4693 }
4694
4695 netif_device_detach(dev);
4696
Yuval Mintz5d07d862012-09-13 02:56:21 +00004697 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004698
4699 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4700
4701 rtnl_unlock();
4702
4703 return 0;
4704}
4705
4706int bnx2x_resume(struct pci_dev *pdev)
4707{
4708 struct net_device *dev = pci_get_drvdata(pdev);
4709 struct bnx2x *bp;
4710 int rc;
4711
4712 if (!dev) {
4713 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4714 return -ENODEV;
4715 }
4716 bp = netdev_priv(dev);
4717
4718 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004719 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004720 return -EAGAIN;
4721 }
4722
4723 rtnl_lock();
4724
4725 pci_restore_state(pdev);
4726
4727 if (!netif_running(dev)) {
4728 rtnl_unlock();
4729 return 0;
4730 }
4731
4732 bnx2x_set_power_state(bp, PCI_D0);
4733 netif_device_attach(dev);
4734
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004735 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4736
4737 rtnl_unlock();
4738
4739 return rc;
4740}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004741
4742
4743void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4744 u32 cid)
4745{
4746 /* ustorm cxt validation */
4747 cxt->ustorm_ag_context.cdu_usage =
4748 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4749 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4750 /* xcontext validation */
4751 cxt->xstorm_ag_context.cdu_reserved =
4752 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4753 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4754}
4755
Eric Dumazet1191cb82012-04-27 21:39:21 +00004756static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4757 u8 fw_sb_id, u8 sb_index,
4758 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004759{
4760
4761 u32 addr = BAR_CSTRORM_INTMEM +
4762 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4763 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004764 DP(NETIF_MSG_IFUP,
4765 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4766 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004767}
4768
Eric Dumazet1191cb82012-04-27 21:39:21 +00004769static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4770 u16 fw_sb_id, u8 sb_index,
4771 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004772{
4773 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4774 u32 addr = BAR_CSTRORM_INTMEM +
4775 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004776 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004777 /* clear and set */
4778 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4779 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004780 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004781 DP(NETIF_MSG_IFUP,
4782 "port %x fw_sb_id %d sb_index %d disable %d\n",
4783 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004784}
4785
4786void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4787 u8 sb_index, u8 disable, u16 usec)
4788{
4789 int port = BP_PORT(bp);
4790 u8 ticks = usec / BNX2X_BTR;
4791
4792 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4793
4794 disable = disable ? 1 : (usec ? 0 : 1);
4795 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4796}