blob: 96a60f012d155796bdb2eb2b9d3bc0f90648f883 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000040 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000042 * source onto the target. Update txdata pointers and related
43 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000044 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000049 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000053 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000055
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000059 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000062
Barak Witkowski15192a82012-06-19 07:48:28 +000063 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
Merav Sicron65565882012-06-19 07:48:26 +000069 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
Yuval Mintz4864a162013-01-10 04:53:39 +000082 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +000084 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000086}
87
Ariel Elior8ca5e172013-01-01 05:22:34 +000088/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000113 }
114}
115
David S. Miller4b87f922013-01-15 15:05:59 -0500116/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000149{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
Merav Sicron51c1a582012-03-18 10:33:38 +0000160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000161 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000162
163 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000167
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300168
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000203 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000207
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000208 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
Ariel Elior6383c0b2011-07-14 08:31:57 +0000215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000217 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000219 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
Ariel Elior6383c0b2011-07-14 08:31:57 +0000226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
Merav Sicron51c1a582012-03-18 10:33:38 +0000235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238
Tom Herbert2df1a702011-11-28 16:33:37 +0000239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000240 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000242 sw_cons++;
243 }
244
Tom Herbert2df1a702011-11-28 16:33:37 +0000245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
Ariel Elior6383c0b2011-07-14 08:31:57 +0000247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000258 */
259 smp_mb();
260
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296{
297 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000315 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317
318 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
Yuval Mintz2de67432013-01-23 03:21:43 +0000346/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000352{
Yuval Mintz2de67432013-01-23 03:21:43 +0000353 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000354 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000361 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000362 }
363 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000364 return 0;
365}
366
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000368 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300369 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
Eric Dumazete52fcb22011-11-14 06:05:34 +0000383 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300384 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000385 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000395 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
399
Eric Dumazete52fcb22011-11-14 06:05:34 +0000400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000403 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420 tpa_info->full_page =
421 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
422 tpa_info->gro_size = gro_size;
423 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300424
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000425#ifdef BNX2X_STOP_ON_ERROR
426 fp->tpa_queue_used |= (1 << queue);
427#ifdef _ASM_GENERIC_INT_L64_H
428 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
429#else
430 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
431#endif
432 fp->tpa_queue_used);
433#endif
434}
435
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000436/* Timestamp option length allowed for TPA aggregation:
437 *
438 * nop nop kind length echo val
439 */
440#define TPA_TSTAMP_OPT_LEN 12
441/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000442 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000443 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000444 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000445 * @parsing_flags: parsing flags from the START CQE
446 * @len_on_bd: total length of the first packet for the
447 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000448 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000449 *
450 * Approximate value of the MSS for this aggregation calculated using
451 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000452 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000453 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000454static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
455 u16 len_on_bd, unsigned int pkt_len)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000456{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000457 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300458 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000459 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000463 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
484 NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
485 skb_shinfo(skb)->gso_size);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000486}
487
Eric Dumazet1191cb82012-04-27 21:39:21 +0000488static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
489 struct bnx2x_fastpath *fp, u16 index)
490{
491 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
492 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
493 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
494 dma_addr_t mapping;
495
496 if (unlikely(page == NULL)) {
497 BNX2X_ERR("Can't alloc sge\n");
498 return -ENOMEM;
499 }
500
501 mapping = dma_map_page(&bp->pdev->dev, page, 0,
502 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
503 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
504 __free_pages(page, PAGES_PER_SGE_SHIFT);
505 BNX2X_ERR("Can't map sge\n");
506 return -ENOMEM;
507 }
508
509 sw_buf->page = page;
510 dma_unmap_addr_set(sw_buf, mapping, mapping);
511
512 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
513 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
514
515 return 0;
516}
517
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000518static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000519 struct bnx2x_agg_info *tpa_info,
520 u16 pages,
521 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300522 struct eth_end_agg_rx_cqe *cqe,
523 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000524{
525 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000526 u32 i, frag_len, frag_size;
527 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300528 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000529 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000530
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300531 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000532
533 if (fp->mode == TPA_MODE_GRO) {
534 gro_size = tpa_info->gro_size;
535 full_page = tpa_info->full_page;
536 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000537
538 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000539 if (frag_size)
540 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
541 le16_to_cpu(cqe->pkt_len));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000542
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543#ifdef BNX2X_STOP_ON_ERROR
544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
562 frag_len = min_t(u32, frag_size,
563 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
564
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000565 rx_pg = &fp->rx_page_ring[sge_idx];
566 old_rx_pg = *rx_pg;
567
568 /* If we fail to allocate a substitute page, we simply stop
569 where we are and drop the whole packet */
570 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
571 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000572 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000573 return err;
574 }
575
576 /* Unmap the page as we r going to pass it to the stack */
577 dma_unmap_page(&bp->pdev->dev,
578 dma_unmap_addr(&old_rx_pg, mapping),
579 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000581 if (fp->mode == TPA_MODE_LRO)
582 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
583 else { /* GRO */
584 int rem;
585 int offset = 0;
586 for (rem = frag_len; rem > 0; rem -= gro_size) {
587 int len = rem > gro_size ? gro_size : rem;
588 skb_fill_page_desc(skb, frag_id++,
589 old_rx_pg.page, offset, len);
590 if (offset)
591 get_page(old_rx_pg.page);
592 offset += len;
593 }
594 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000595
596 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000597 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598 skb->len += frag_len;
599
600 frag_size -= frag_len;
601 }
602
603 return 0;
604}
605
Eric Dumazetd46d1322012-12-10 12:16:06 +0000606static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
607{
608 if (fp->rx_frag_size)
609 put_page(virt_to_head_page(data));
610 else
611 kfree(data);
612}
613
614static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
615{
616 if (fp->rx_frag_size)
617 return netdev_alloc_frag(fp->rx_frag_size);
618
619 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
620}
621
Yuval Mintz99690852013-01-14 05:11:49 +0000622#ifdef CONFIG_INET
623static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
624{
625 const struct iphdr *iph = ip_hdr(skb);
626 struct tcphdr *th;
627
628 skb_set_transport_header(skb, sizeof(struct iphdr));
629 th = tcp_hdr(skb);
630
631 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
632 iph->saddr, iph->daddr, 0);
633}
634
635static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
636{
637 struct ipv6hdr *iph = ipv6_hdr(skb);
638 struct tcphdr *th;
639
640 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
641 th = tcp_hdr(skb);
642
643 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
644 &iph->saddr, &iph->daddr, 0);
645}
646#endif
647
648static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
649 struct sk_buff *skb)
650{
651#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000652 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000653 skb_set_network_header(skb, 0);
654 switch (be16_to_cpu(skb->protocol)) {
655 case ETH_P_IP:
656 bnx2x_gro_ip_csum(bp, skb);
657 break;
658 case ETH_P_IPV6:
659 bnx2x_gro_ipv6_csum(bp, skb);
660 break;
661 default:
662 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
663 be16_to_cpu(skb->protocol));
664 }
665 tcp_gro_complete(skb);
666 }
667#endif
668 napi_gro_receive(&fp->napi, skb);
669}
670
Eric Dumazet1191cb82012-04-27 21:39:21 +0000671static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
672 struct bnx2x_agg_info *tpa_info,
673 u16 pages,
674 struct eth_end_agg_rx_cqe *cqe,
675 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000676{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300677 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000678 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300679 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000680 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000681 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300682 u8 old_tpa_state = tpa_info->tpa_state;
683
684 tpa_info->tpa_state = BNX2X_TPA_STOP;
685
686 /* If we there was an error during the handling of the TPA_START -
687 * drop this aggregation.
688 */
689 if (old_tpa_state == BNX2X_TPA_ERROR)
690 goto drop;
691
Eric Dumazete52fcb22011-11-14 06:05:34 +0000692 /* Try to allocate the new data */
Eric Dumazetd46d1322012-12-10 12:16:06 +0000693 new_data = bnx2x_frag_alloc(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000694 /* Unmap skb in the pool anyway, as we are going to change
695 pool entry status to BNX2X_TPA_STOP even if new skb allocation
696 fails. */
697 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800698 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000699 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000700 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000701
Eric Dumazete52fcb22011-11-14 06:05:34 +0000702 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000703#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800704 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000705 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800706 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000707 bnx2x_panic();
708 return;
709 }
710#endif
711
Eric Dumazete52fcb22011-11-14 06:05:34 +0000712 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000713 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000714 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000715 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000716
717 skb->protocol = eth_type_trans(skb, bp->dev);
718 skb->ip_summed = CHECKSUM_UNNECESSARY;
719
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000720 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
721 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300722 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
723 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000724 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000726 DP(NETIF_MSG_RX_STATUS,
727 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000728 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000729 }
730
731
Eric Dumazete52fcb22011-11-14 06:05:34 +0000732 /* put new data in bin */
733 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000734
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300735 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000736 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000737 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300738drop:
739 /* drop the packet and keep the buffer in the bin */
740 DP(NETIF_MSG_RX_STATUS,
741 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000742 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000743}
744
Eric Dumazet1191cb82012-04-27 21:39:21 +0000745static int bnx2x_alloc_rx_data(struct bnx2x *bp,
746 struct bnx2x_fastpath *fp, u16 index)
747{
748 u8 *data;
749 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
750 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
751 dma_addr_t mapping;
752
Eric Dumazetd46d1322012-12-10 12:16:06 +0000753 data = bnx2x_frag_alloc(fp);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000754 if (unlikely(data == NULL))
755 return -ENOMEM;
756
757 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
758 fp->rx_buf_size,
759 DMA_FROM_DEVICE);
760 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000761 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000762 BNX2X_ERR("Can't map rx data\n");
763 return -ENOMEM;
764 }
765
766 rx_buf->data = data;
767 dma_unmap_addr_set(rx_buf, mapping, mapping);
768
769 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
770 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
771
772 return 0;
773}
774
Barak Witkowski15192a82012-06-19 07:48:28 +0000775static
776void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
777 struct bnx2x_fastpath *fp,
778 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000779{
Michal Schmidte4889212012-09-13 12:59:44 +0000780 /* Do nothing if no L4 csum validation was done.
781 * We do not check whether IP csum was validated. For IPv4 we assume
782 * that if the card got as far as validating the L4 csum, it also
783 * validated the IP csum. IPv6 has no IP csum.
784 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000785 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000786 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000787 return;
788
Michal Schmidte4889212012-09-13 12:59:44 +0000789 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000790
791 if (cqe->fast_path_cqe.type_error_flags &
792 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
793 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000794 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000795 else
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
797}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000798
799int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
800{
801 struct bnx2x *bp = fp->bp;
802 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
803 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
804 int rx_pkt = 0;
805
806#ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp->panic))
808 return 0;
809#endif
810
811 /* CQ "next element" is of the size of the regular element,
812 that's why it's ok here */
813 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
814 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
815 hw_comp_cons++;
816
817 bd_cons = fp->rx_bd_cons;
818 bd_prod = fp->rx_bd_prod;
819 bd_prod_fw = bd_prod;
820 sw_comp_cons = fp->rx_comp_cons;
821 sw_comp_prod = fp->rx_comp_prod;
822
823 /* Memory barrier necessary as speculative reads of the rx
824 * buffer can be ahead of the index in the status block
825 */
826 rmb();
827
828 DP(NETIF_MSG_RX_STATUS,
829 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
830 fp->index, hw_comp_cons, sw_comp_cons);
831
832 while (sw_comp_cons != hw_comp_cons) {
833 struct sw_rx_bd *rx_buf = NULL;
834 struct sk_buff *skb;
835 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300836 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000837 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300838 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000839 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000840 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000841 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000842
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300843#ifdef BNX2X_STOP_ON_ERROR
844 if (unlikely(bp->panic))
845 return 0;
846#endif
847
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000848 comp_ring_cons = RCQ_BD(sw_comp_cons);
849 bd_prod = RX_BD(bd_prod);
850 bd_cons = RX_BD(bd_cons);
851
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000852 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300853 cqe_fp = &cqe->fast_path_cqe;
854 cqe_fp_flags = cqe_fp->type_error_flags;
855 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000856
Merav Sicron51c1a582012-03-18 10:33:38 +0000857 DP(NETIF_MSG_RX_STATUS,
858 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
859 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300860 cqe_fp_flags, cqe_fp->status_flags,
861 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000862 le16_to_cpu(cqe_fp->vlan_tag),
863 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000864
865 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300866 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000867 bnx2x_sp_event(fp, cqe);
868 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000869 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000870
Eric Dumazete52fcb22011-11-14 06:05:34 +0000871 rx_buf = &fp->rx_buf_ring[bd_cons];
872 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000873
Eric Dumazete52fcb22011-11-14 06:05:34 +0000874 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000875 struct bnx2x_agg_info *tpa_info;
876 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300877#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000878 /* sanity check */
879 if (fp->disable_tpa &&
880 (CQE_TYPE_START(cqe_fp_type) ||
881 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000882 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000883 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300884#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000885
Eric Dumazete52fcb22011-11-14 06:05:34 +0000886 if (CQE_TYPE_START(cqe_fp_type)) {
887 u16 queue = cqe_fp->queue_index;
888 DP(NETIF_MSG_RX_STATUS,
889 "calling tpa_start on queue %d\n",
890 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000891
Eric Dumazete52fcb22011-11-14 06:05:34 +0000892 bnx2x_tpa_start(fp, queue,
893 bd_cons, bd_prod,
894 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000895
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000896 goto next_rx;
897
898 }
899 queue = cqe->end_agg_cqe.queue_index;
900 tpa_info = &fp->tpa_info[queue];
901 DP(NETIF_MSG_RX_STATUS,
902 "calling tpa_stop on queue %d\n",
903 queue);
904
905 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
906 tpa_info->len_on_bd;
907
908 if (fp->mode == TPA_MODE_GRO)
909 pages = (frag_size + tpa_info->full_page - 1) /
910 tpa_info->full_page;
911 else
912 pages = SGE_PAGE_ALIGN(frag_size) >>
913 SGE_PAGE_SHIFT;
914
915 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
916 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000917#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000918 if (bp->panic)
919 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000920#endif
921
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000922 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
923 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000924 }
925 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000926 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000927 pad = cqe_fp->placement_offset;
928 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000929 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000930 pad + RX_COPY_THRESH,
931 DMA_FROM_DEVICE);
932 pad += NET_SKB_PAD;
933 prefetch(data + pad); /* speedup eth_type_trans() */
934 /* is this an error packet? */
935 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000936 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000937 "ERROR flags %x rx packet %u\n",
938 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000939 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000940 goto reuse_rx;
941 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000942
Eric Dumazete52fcb22011-11-14 06:05:34 +0000943 /* Since we don't have a jumbo ring
944 * copy small packets if mtu > 1500
945 */
946 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
947 (len <= RX_COPY_THRESH)) {
948 skb = netdev_alloc_skb_ip_align(bp->dev, len);
949 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000950 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000951 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000952 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000953 goto reuse_rx;
954 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000955 memcpy(skb->data, data + pad, len);
956 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
957 } else {
958 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000959 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800961 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000962 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000963 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000964 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000965 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000966 bnx2x_fp_qstats(bp, fp)->
967 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000968 goto next_rx;
969 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000970 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000971 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000972 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
973 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000974 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000976 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000977 goto next_rx;
978 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000979 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000981 skb_put(skb, len);
982 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000983
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000984 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000985 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
986 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000987
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000988 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000989
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000990 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000991 bnx2x_csum_validate(skb, cqe, fp,
992 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000993
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000994 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000995
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300996 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
997 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000998 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300999 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +00001000 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001
1002
1003next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001004 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001005
1006 bd_cons = NEXT_RX_IDX(bd_cons);
1007 bd_prod = NEXT_RX_IDX(bd_prod);
1008 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1009 rx_pkt++;
1010next_cqe:
1011 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1012 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1013
1014 if (rx_pkt == budget)
1015 break;
1016 } /* while */
1017
1018 fp->rx_bd_cons = bd_cons;
1019 fp->rx_bd_prod = bd_prod_fw;
1020 fp->rx_comp_cons = sw_comp_cons;
1021 fp->rx_comp_prod = sw_comp_prod;
1022
1023 /* Update producers */
1024 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1025 fp->rx_sge_prod);
1026
1027 fp->rx_pkt += rx_pkt;
1028 fp->rx_calls++;
1029
1030 return rx_pkt;
1031}
1032
1033static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1034{
1035 struct bnx2x_fastpath *fp = fp_cookie;
1036 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001037 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001038
Merav Sicron51c1a582012-03-18 10:33:38 +00001039 DP(NETIF_MSG_INTR,
1040 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001041 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1042 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001043
1044#ifdef BNX2X_STOP_ON_ERROR
1045 if (unlikely(bp->panic))
1046 return IRQ_HANDLED;
1047#endif
1048
1049 /* Handle Rx and Tx according to MSI-X vector */
1050 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001051
1052 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001053 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001054
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001055 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001056 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1057
1058 return IRQ_HANDLED;
1059}
1060
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061/* HW Lock for shared dual port PHYs */
1062void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1063{
1064 mutex_lock(&bp->port.phy_mutex);
1065
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001066 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001067}
1068
1069void bnx2x_release_phy_lock(struct bnx2x *bp)
1070{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001071 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001072
1073 mutex_unlock(&bp->port.phy_mutex);
1074}
1075
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001076/* calculates MF speed according to current linespeed and MF configuration */
1077u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1078{
1079 u16 line_speed = bp->link_vars.line_speed;
1080 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001081 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1082 bp->mf_config[BP_VN(bp)]);
1083
1084 /* Calculate the current MAX line speed limit for the MF
1085 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001086 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001087 if (IS_MF_SI(bp))
1088 line_speed = (line_speed * maxCfg) / 100;
1089 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001090 u16 vn_max_rate = maxCfg * 100;
1091
1092 if (vn_max_rate < line_speed)
1093 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001094 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001095 }
1096
1097 return line_speed;
1098}
1099
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001100/**
1101 * bnx2x_fill_report_data - fill link report data to report
1102 *
1103 * @bp: driver handle
1104 * @data: link state to update
1105 *
1106 * It uses a none-atomic bit operations because is called under the mutex.
1107 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001108static void bnx2x_fill_report_data(struct bnx2x *bp,
1109 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001110{
1111 u16 line_speed = bnx2x_get_mf_speed(bp);
1112
1113 memset(data, 0, sizeof(*data));
1114
1115 /* Fill the report data: efective line speed */
1116 data->line_speed = line_speed;
1117
1118 /* Link is down */
1119 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1120 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1121 &data->link_report_flags);
1122
1123 /* Full DUPLEX */
1124 if (bp->link_vars.duplex == DUPLEX_FULL)
1125 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1126
1127 /* Rx Flow Control is ON */
1128 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1129 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1130
1131 /* Tx Flow Control is ON */
1132 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1133 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1134}
1135
1136/**
1137 * bnx2x_link_report - report link status to OS.
1138 *
1139 * @bp: driver handle
1140 *
1141 * Calls the __bnx2x_link_report() under the same locking scheme
1142 * as a link/PHY state managing code to ensure a consistent link
1143 * reporting.
1144 */
1145
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001146void bnx2x_link_report(struct bnx2x *bp)
1147{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001148 bnx2x_acquire_phy_lock(bp);
1149 __bnx2x_link_report(bp);
1150 bnx2x_release_phy_lock(bp);
1151}
1152
1153/**
1154 * __bnx2x_link_report - report link status to OS.
1155 *
1156 * @bp: driver handle
1157 *
1158 * None atomic inmlementation.
1159 * Should be called under the phy_lock.
1160 */
1161void __bnx2x_link_report(struct bnx2x *bp)
1162{
1163 struct bnx2x_link_report_data cur_data;
1164
1165 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001166 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001167 bnx2x_read_mf_cfg(bp);
1168
1169 /* Read the current link report info */
1170 bnx2x_fill_report_data(bp, &cur_data);
1171
1172 /* Don't report link down or exactly the same link status twice */
1173 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1174 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1175 &bp->last_reported_link.link_report_flags) &&
1176 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &cur_data.link_report_flags)))
1178 return;
1179
1180 bp->link_cnt++;
1181
1182 /* We are going to report a new link parameters now -
1183 * remember the current data for the next time.
1184 */
1185 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1186
1187 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1188 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001189 netif_carrier_off(bp->dev);
1190 netdev_err(bp->dev, "NIC Link is Down\n");
1191 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001192 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001193 const char *duplex;
1194 const char *flow;
1195
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001196 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001197
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001198 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1199 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001200 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001201 else
Joe Perches94f05b02011-08-14 12:16:20 +00001202 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001204 /* Handle the FC at the end so that only these flags would be
1205 * possibly set. This way we may easily check if there is no FC
1206 * enabled.
1207 */
1208 if (cur_data.link_report_flags) {
1209 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1210 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001211 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1212 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001213 flow = "ON - receive & transmit";
1214 else
1215 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001216 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001217 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001218 }
Joe Perches94f05b02011-08-14 12:16:20 +00001219 } else {
1220 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001221 }
Joe Perches94f05b02011-08-14 12:16:20 +00001222 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1223 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001224 }
1225}
1226
Eric Dumazet1191cb82012-04-27 21:39:21 +00001227static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1228{
1229 int i;
1230
1231 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1232 struct eth_rx_sge *sge;
1233
1234 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1235 sge->addr_hi =
1236 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1237 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1238
1239 sge->addr_lo =
1240 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1241 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1242 }
1243}
1244
1245static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1246 struct bnx2x_fastpath *fp, int last)
1247{
1248 int i;
1249
1250 for (i = 0; i < last; i++) {
1251 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1252 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1253 u8 *data = first_buf->data;
1254
1255 if (data == NULL) {
1256 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1257 continue;
1258 }
1259 if (tpa_info->tpa_state == BNX2X_TPA_START)
1260 dma_unmap_single(&bp->pdev->dev,
1261 dma_unmap_addr(first_buf, mapping),
1262 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001263 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001264 first_buf->data = NULL;
1265 }
1266}
1267
Merav Sicron55c11942012-11-07 00:45:48 +00001268void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1269{
1270 int j;
1271
1272 for_each_rx_queue_cnic(bp, j) {
1273 struct bnx2x_fastpath *fp = &bp->fp[j];
1274
1275 fp->rx_bd_cons = 0;
1276
1277 /* Activate BD ring */
1278 /* Warning!
1279 * this will generate an interrupt (to the TSTORM)
1280 * must only be done after chip is initialized
1281 */
1282 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1283 fp->rx_sge_prod);
1284 }
1285}
1286
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001287void bnx2x_init_rx_rings(struct bnx2x *bp)
1288{
1289 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001290 u16 ring_prod;
1291 int i, j;
1292
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001293 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001294 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001295 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001296
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001297 DP(NETIF_MSG_IFUP,
1298 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1299
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001300 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001301 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001302 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001303 struct bnx2x_agg_info *tpa_info =
1304 &fp->tpa_info[i];
1305 struct sw_rx_bd *first_buf =
1306 &tpa_info->first_buf;
1307
Eric Dumazetd46d1322012-12-10 12:16:06 +00001308 first_buf->data = bnx2x_frag_alloc(fp);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001309 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001310 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1311 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001312 bnx2x_free_tpa_pool(bp, fp, i);
1313 fp->disable_tpa = 1;
1314 break;
1315 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001316 dma_unmap_addr_set(first_buf, mapping, 0);
1317 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001318 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001319
1320 /* "next page" elements initialization */
1321 bnx2x_set_next_page_sgl(fp);
1322
1323 /* set SGEs bit mask */
1324 bnx2x_init_sge_ring_bit_mask(fp);
1325
1326 /* Allocate SGEs and initialize the ring elements */
1327 for (i = 0, ring_prod = 0;
1328 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1329
1330 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001331 BNX2X_ERR("was only able to allocate %d rx sges\n",
1332 i);
1333 BNX2X_ERR("disabling TPA for queue[%d]\n",
1334 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001335 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001336 bnx2x_free_rx_sge_range(bp, fp,
1337 ring_prod);
1338 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001339 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001340 fp->disable_tpa = 1;
1341 ring_prod = 0;
1342 break;
1343 }
1344 ring_prod = NEXT_SGE_IDX(ring_prod);
1345 }
1346
1347 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001348 }
1349 }
1350
Merav Sicron55c11942012-11-07 00:45:48 +00001351 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001352 struct bnx2x_fastpath *fp = &bp->fp[j];
1353
1354 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001356 /* Activate BD ring */
1357 /* Warning!
1358 * this will generate an interrupt (to the TSTORM)
1359 * must only be done after chip is initialized
1360 */
1361 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1362 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364 if (j != 0)
1365 continue;
1366
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001367 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001368 REG_WR(bp, BAR_USTRORM_INTMEM +
1369 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1370 U64_LO(fp->rx_comp_mapping));
1371 REG_WR(bp, BAR_USTRORM_INTMEM +
1372 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1373 U64_HI(fp->rx_comp_mapping));
1374 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001375 }
1376}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001377
Merav Sicron55c11942012-11-07 00:45:48 +00001378static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1379{
1380 u8 cos;
1381 struct bnx2x *bp = fp->bp;
1382
1383 for_each_cos_in_tx_queue(fp, cos) {
1384 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1385 unsigned pkts_compl = 0, bytes_compl = 0;
1386
1387 u16 sw_prod = txdata->tx_pkt_prod;
1388 u16 sw_cons = txdata->tx_pkt_cons;
1389
1390 while (sw_cons != sw_prod) {
1391 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1392 &pkts_compl, &bytes_compl);
1393 sw_cons++;
1394 }
1395
1396 netdev_tx_reset_queue(
1397 netdev_get_tx_queue(bp->dev,
1398 txdata->txq_index));
1399 }
1400}
1401
1402static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1403{
1404 int i;
1405
1406 for_each_tx_queue_cnic(bp, i) {
1407 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1408 }
1409}
1410
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001411static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1412{
1413 int i;
1414
Merav Sicron55c11942012-11-07 00:45:48 +00001415 for_each_eth_queue(bp, i) {
1416 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001417 }
1418}
1419
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001420static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1421{
1422 struct bnx2x *bp = fp->bp;
1423 int i;
1424
1425 /* ring wasn't allocated */
1426 if (fp->rx_buf_ring == NULL)
1427 return;
1428
1429 for (i = 0; i < NUM_RX_BD; i++) {
1430 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001431 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001432
Eric Dumazete52fcb22011-11-14 06:05:34 +00001433 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001434 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001435 dma_unmap_single(&bp->pdev->dev,
1436 dma_unmap_addr(rx_buf, mapping),
1437 fp->rx_buf_size, DMA_FROM_DEVICE);
1438
Eric Dumazete52fcb22011-11-14 06:05:34 +00001439 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001440 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001441 }
1442}
1443
Merav Sicron55c11942012-11-07 00:45:48 +00001444static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1445{
1446 int j;
1447
1448 for_each_rx_queue_cnic(bp, j) {
1449 bnx2x_free_rx_bds(&bp->fp[j]);
1450 }
1451}
1452
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001453static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1454{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001455 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001456
Merav Sicron55c11942012-11-07 00:45:48 +00001457 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458 struct bnx2x_fastpath *fp = &bp->fp[j];
1459
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001460 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001461
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001462 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001463 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464 }
1465}
1466
Merav Sicron55c11942012-11-07 00:45:48 +00001467void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1468{
1469 bnx2x_free_tx_skbs_cnic(bp);
1470 bnx2x_free_rx_skbs_cnic(bp);
1471}
1472
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001473void bnx2x_free_skbs(struct bnx2x *bp)
1474{
1475 bnx2x_free_tx_skbs(bp);
1476 bnx2x_free_rx_skbs(bp);
1477}
1478
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001479void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1480{
1481 /* load old values */
1482 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1483
1484 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1485 /* leave all but MAX value */
1486 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1487
1488 /* set new MAX value */
1489 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1490 & FUNC_MF_CFG_MAX_BW_MASK;
1491
1492 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1493 }
1494}
1495
Dmitry Kravkovca924292011-06-14 01:33:08 +00001496/**
1497 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1498 *
1499 * @bp: driver handle
1500 * @nvecs: number of vectors to be released
1501 */
1502static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001503{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001504 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001505
Dmitry Kravkovca924292011-06-14 01:33:08 +00001506 if (nvecs == offset)
1507 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001508
1509 /* VFs don't have a default SB */
1510 if (IS_PF(bp)) {
1511 free_irq(bp->msix_table[offset].vector, bp->dev);
1512 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1513 bp->msix_table[offset].vector);
1514 offset++;
1515 }
Merav Sicron55c11942012-11-07 00:45:48 +00001516
1517 if (CNIC_SUPPORT(bp)) {
1518 if (nvecs == offset)
1519 return;
1520 offset++;
1521 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001522
Dmitry Kravkovca924292011-06-14 01:33:08 +00001523 for_each_eth_queue(bp, i) {
1524 if (nvecs == offset)
1525 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001526 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1527 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001528
1529 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001530 }
1531}
1532
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001533void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001534{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001535 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001536 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1537 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1538
1539 /* vfs don't have a default status block */
1540 if (IS_PF(bp))
1541 nvecs++;
1542
1543 bnx2x_free_msix_irqs(bp, nvecs);
1544 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001545 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001546 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001547}
1548
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001549int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001550{
Ariel Elior1ab44342013-01-01 05:22:23 +00001551 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001552
Ariel Elior1ab44342013-01-01 05:22:23 +00001553 /* VFs don't have a default status block */
1554 if (IS_PF(bp)) {
1555 bp->msix_table[msix_vec].entry = msix_vec;
1556 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1557 bp->msix_table[0].entry);
1558 msix_vec++;
1559 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001560
Merav Sicron55c11942012-11-07 00:45:48 +00001561 /* Cnic requires an msix vector for itself */
1562 if (CNIC_SUPPORT(bp)) {
1563 bp->msix_table[msix_vec].entry = msix_vec;
1564 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1565 msix_vec, bp->msix_table[msix_vec].entry);
1566 msix_vec++;
1567 }
1568
Ariel Elior6383c0b2011-07-14 08:31:57 +00001569 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001570 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001571 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001572 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1573 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001574 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001575 }
1576
Ariel Elior1ab44342013-01-01 05:22:23 +00001577 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1578 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001579
Ariel Elior1ab44342013-01-01 05:22:23 +00001580 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001581
1582 /*
1583 * reconfigure number of tx/rx queues according to available
1584 * MSI-X vectors
1585 */
Merav Sicron55c11942012-11-07 00:45:48 +00001586 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001587 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001588 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001589
Merav Sicron51c1a582012-03-18 10:33:38 +00001590 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001591
1592 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1593
1594 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001595 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1596 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001597 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001598 /*
1599 * decrease number of queues by number of unallocated entries
1600 */
Merav Sicron55c11942012-11-07 00:45:48 +00001601 bp->num_ethernet_queues -= diff;
1602 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001603
Merav Sicron51c1a582012-03-18 10:33:38 +00001604 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001605 bp->num_queues);
1606 } else if (rc > 0) {
1607 /* Get by with single vector */
1608 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1609 if (rc) {
1610 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1611 rc);
1612 goto no_msix;
1613 }
1614
1615 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1616 bp->flags |= USING_SINGLE_MSIX_FLAG;
1617
Merav Sicron55c11942012-11-07 00:45:48 +00001618 BNX2X_DEV_INFO("set number of queues to 1\n");
1619 bp->num_ethernet_queues = 1;
1620 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001621 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001622 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001623 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001624 }
1625
1626 bp->flags |= USING_MSIX_FLAG;
1627
1628 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001629
1630no_msix:
1631 /* fall to INTx if not enough memory */
1632 if (rc == -ENOMEM)
1633 bp->flags |= DISABLE_MSI_FLAG;
1634
1635 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001636}
1637
1638static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1639{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001640 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001641
Ariel Eliorad5afc82013-01-01 05:22:26 +00001642 /* no default status block for vf */
1643 if (IS_PF(bp)) {
1644 rc = request_irq(bp->msix_table[offset++].vector,
1645 bnx2x_msix_sp_int, 0,
1646 bp->dev->name, bp->dev);
1647 if (rc) {
1648 BNX2X_ERR("request sp irq failed\n");
1649 return -EBUSY;
1650 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001651 }
1652
Merav Sicron55c11942012-11-07 00:45:48 +00001653 if (CNIC_SUPPORT(bp))
1654 offset++;
1655
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001656 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001657 struct bnx2x_fastpath *fp = &bp->fp[i];
1658 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1659 bp->dev->name, i);
1660
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001661 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001662 bnx2x_msix_fp_int, 0, fp->name, fp);
1663 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001664 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1665 bp->msix_table[offset].vector, rc);
1666 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001667 return -EBUSY;
1668 }
1669
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001670 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001671 }
1672
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001673 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001674 if (IS_PF(bp)) {
1675 offset = 1 + CNIC_SUPPORT(bp);
1676 netdev_info(bp->dev,
1677 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1678 bp->msix_table[0].vector,
1679 0, bp->msix_table[offset].vector,
1680 i - 1, bp->msix_table[offset + i - 1].vector);
1681 } else {
1682 offset = CNIC_SUPPORT(bp);
1683 netdev_info(bp->dev,
1684 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1685 0, bp->msix_table[offset].vector,
1686 i - 1, bp->msix_table[offset + i - 1].vector);
1687 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001688 return 0;
1689}
1690
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001691int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001692{
1693 int rc;
1694
1695 rc = pci_enable_msi(bp->pdev);
1696 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001697 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001698 return -1;
1699 }
1700 bp->flags |= USING_MSI_FLAG;
1701
1702 return 0;
1703}
1704
1705static int bnx2x_req_irq(struct bnx2x *bp)
1706{
1707 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001708 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001709
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001710 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001711 flags = 0;
1712 else
1713 flags = IRQF_SHARED;
1714
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001715 if (bp->flags & USING_MSIX_FLAG)
1716 irq = bp->msix_table[0].vector;
1717 else
1718 irq = bp->pdev->irq;
1719
1720 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001721}
1722
Eric Dumazet1191cb82012-04-27 21:39:21 +00001723static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001724{
1725 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001726 if (bp->flags & USING_MSIX_FLAG &&
1727 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001728 rc = bnx2x_req_msix_irqs(bp);
1729 if (rc)
1730 return rc;
1731 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001732 rc = bnx2x_req_irq(bp);
1733 if (rc) {
1734 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1735 return rc;
1736 }
1737 if (bp->flags & USING_MSI_FLAG) {
1738 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001739 netdev_info(bp->dev, "using MSI IRQ %d\n",
1740 bp->dev->irq);
1741 }
1742 if (bp->flags & USING_MSIX_FLAG) {
1743 bp->dev->irq = bp->msix_table[0].vector;
1744 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1745 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001746 }
1747 }
1748
1749 return 0;
1750}
1751
Merav Sicron55c11942012-11-07 00:45:48 +00001752static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1753{
1754 int i;
1755
1756 for_each_rx_queue_cnic(bp, i)
1757 napi_enable(&bnx2x_fp(bp, i, napi));
1758}
1759
Eric Dumazet1191cb82012-04-27 21:39:21 +00001760static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001761{
1762 int i;
1763
Merav Sicron55c11942012-11-07 00:45:48 +00001764 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001765 napi_enable(&bnx2x_fp(bp, i, napi));
1766}
1767
Merav Sicron55c11942012-11-07 00:45:48 +00001768static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1769{
1770 int i;
1771
1772 for_each_rx_queue_cnic(bp, i)
1773 napi_disable(&bnx2x_fp(bp, i, napi));
1774}
1775
Eric Dumazet1191cb82012-04-27 21:39:21 +00001776static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001777{
1778 int i;
1779
Merav Sicron55c11942012-11-07 00:45:48 +00001780 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001781 napi_disable(&bnx2x_fp(bp, i, napi));
1782}
1783
1784void bnx2x_netif_start(struct bnx2x *bp)
1785{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001786 if (netif_running(bp->dev)) {
1787 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001788 if (CNIC_LOADED(bp))
1789 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001790 bnx2x_int_enable(bp);
1791 if (bp->state == BNX2X_STATE_OPEN)
1792 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001793 }
1794}
1795
1796void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1797{
1798 bnx2x_int_disable_sync(bp, disable_hw);
1799 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001800 if (CNIC_LOADED(bp))
1801 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001802}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001803
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001804u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1805{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001806 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001807
Merav Sicron55c11942012-11-07 00:45:48 +00001808 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001809 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1810 u16 ether_type = ntohs(hdr->h_proto);
1811
1812 /* Skip VLAN tag if present */
1813 if (ether_type == ETH_P_8021Q) {
1814 struct vlan_ethhdr *vhdr =
1815 (struct vlan_ethhdr *)skb->data;
1816
1817 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1818 }
1819
1820 /* If ethertype is FCoE or FIP - use FCoE ring */
1821 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001822 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001823 }
Merav Sicron55c11942012-11-07 00:45:48 +00001824
David S. Miller823dcd22011-08-20 10:39:12 -07001825 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001826 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001827}
1828
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001829void bnx2x_set_num_queues(struct bnx2x *bp)
1830{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001831 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001832 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001833
Barak Witkowskia3348722012-04-23 03:04:46 +00001834 /* override in STORAGE SD modes */
1835 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001836 bp->num_ethernet_queues = 1;
1837
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001838 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001839 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1840 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001841
1842 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001843}
1844
David S. Miller823dcd22011-08-20 10:39:12 -07001845/**
1846 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1847 *
1848 * @bp: Driver handle
1849 *
1850 * We currently support for at most 16 Tx queues for each CoS thus we will
1851 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1852 * bp->max_cos.
1853 *
1854 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1855 * index after all ETH L2 indices.
1856 *
1857 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1858 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1859 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1860 *
1861 * The proper configuration of skb->queue_mapping is handled by
1862 * bnx2x_select_queue() and __skb_tx_hash().
1863 *
1864 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1865 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1866 */
Merav Sicron55c11942012-11-07 00:45:48 +00001867static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001868{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001869 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001870
Merav Sicron65565882012-06-19 07:48:26 +00001871 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001872 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001873
1874/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001875 if (include_cnic && !NO_FCOE(bp)) {
1876 rx++;
1877 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001878 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001879
1880 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1881 if (rc) {
1882 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1883 return rc;
1884 }
1885 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1886 if (rc) {
1887 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1888 return rc;
1889 }
1890
Merav Sicron51c1a582012-03-18 10:33:38 +00001891 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001892 tx, rx);
1893
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001894 return rc;
1895}
1896
Eric Dumazet1191cb82012-04-27 21:39:21 +00001897static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001898{
1899 int i;
1900
1901 for_each_queue(bp, i) {
1902 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001903 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001904
1905 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1906 if (IS_FCOE_IDX(i))
1907 /*
1908 * Although there are no IP frames expected to arrive to
1909 * this ring we still want to add an
1910 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1911 * overrun attack.
1912 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001913 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001914 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001915 mtu = bp->dev->mtu;
1916 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1917 IP_HEADER_ALIGNMENT_PADDING +
1918 ETH_OVREHEAD +
1919 mtu +
1920 BNX2X_FW_RX_ALIGN_END;
1921 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001922 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1923 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1924 else
1925 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001926 }
1927}
1928
Eric Dumazet1191cb82012-04-27 21:39:21 +00001929static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001930{
1931 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001932 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1933
Dmitry Kravkov96305232012-04-03 18:41:30 +00001934 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001935 * enabled
1936 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001937 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1938 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001939 bp->fp->cl_id +
1940 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001941
1942 /*
1943 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1944 * per-port, so if explicit configuration is needed , do it only
1945 * for a PMF.
1946 *
1947 * For 57712 and newer on the other hand it's a per-function
1948 * configuration.
1949 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001950 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001951}
1952
Dmitry Kravkov96305232012-04-03 18:41:30 +00001953int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001954 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001955{
Yuval Mintz3b603062012-03-18 10:33:39 +00001956 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957
1958 /* Although RSS is meaningless when there is a single HW queue we
1959 * still need it enabled in order to have HW Rx hash generated.
1960 *
1961 * if (!is_eth_multi(bp))
1962 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1963 */
1964
Dmitry Kravkov96305232012-04-03 18:41:30 +00001965 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001966
1967 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1968
Dmitry Kravkov96305232012-04-03 18:41:30 +00001969 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001970
Dmitry Kravkov96305232012-04-03 18:41:30 +00001971 /* RSS configuration */
1972 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1973 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1974 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1975 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001976 if (rss_obj->udp_rss_v4)
1977 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1978 if (rss_obj->udp_rss_v6)
1979 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001980
Dmitry Kravkov96305232012-04-03 18:41:30 +00001981 /* Hash bits */
1982 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001983
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001984 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001985
Dmitry Kravkov96305232012-04-03 18:41:30 +00001986 if (config_hash) {
1987 /* RSS keys */
Akinobu Mita8376d0b2012-12-17 16:04:28 -08001988 prandom_bytes(params.rss_key, sizeof(params.rss_key));
Dmitry Kravkov96305232012-04-03 18:41:30 +00001989 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001990 }
1991
1992 return bnx2x_config_rss(bp, &params);
1993}
1994
Eric Dumazet1191cb82012-04-27 21:39:21 +00001995static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001996{
Yuval Mintz3b603062012-03-18 10:33:39 +00001997 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001998
1999 /* Prepare parameters for function state transitions */
2000 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2001
2002 func_params.f_obj = &bp->func_obj;
2003 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2004
2005 func_params.params.hw_init.load_phase = load_code;
2006
2007 return bnx2x_func_state_change(bp, &func_params);
2008}
2009
2010/*
2011 * Cleans the object that have internal lists without sending
2012 * ramrods. Should be run when interrutps are disabled.
2013 */
2014static void bnx2x_squeeze_objects(struct bnx2x *bp)
2015{
2016 int rc;
2017 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002018 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002019 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002020
2021 /***************** Cleanup MACs' object first *************************/
2022
2023 /* Wait for completion of requested */
2024 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2025 /* Perform a dry cleanup */
2026 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2027
2028 /* Clean ETH primary MAC */
2029 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002030 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002031 &ramrod_flags);
2032 if (rc != 0)
2033 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2034
2035 /* Cleanup UC list */
2036 vlan_mac_flags = 0;
2037 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2038 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2039 &ramrod_flags);
2040 if (rc != 0)
2041 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2042
2043 /***************** Now clean mcast object *****************************/
2044 rparam.mcast_obj = &bp->mcast_obj;
2045 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2046
2047 /* Add a DEL command... */
2048 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2049 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002050 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2051 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002052
2053 /* ...and wait until all pending commands are cleared */
2054 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2055 while (rc != 0) {
2056 if (rc < 0) {
2057 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2058 rc);
2059 return;
2060 }
2061
2062 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2063 }
2064}
2065
2066#ifndef BNX2X_STOP_ON_ERROR
2067#define LOAD_ERROR_EXIT(bp, label) \
2068 do { \
2069 (bp)->state = BNX2X_STATE_ERROR; \
2070 goto label; \
2071 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002072
2073#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2074 do { \
2075 bp->cnic_loaded = false; \
2076 goto label; \
2077 } while (0)
2078#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002079#define LOAD_ERROR_EXIT(bp, label) \
2080 do { \
2081 (bp)->state = BNX2X_STATE_ERROR; \
2082 (bp)->panic = 1; \
2083 return -EBUSY; \
2084 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002085#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2086 do { \
2087 bp->cnic_loaded = false; \
2088 (bp)->panic = 1; \
2089 return -EBUSY; \
2090 } while (0)
2091#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002092
Ariel Eliorad5afc82013-01-01 05:22:26 +00002093static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002094{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002095 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2096 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2097 return;
2098}
Yuval Mintz452427b2012-03-26 20:47:07 +00002099
Ariel Eliorad5afc82013-01-01 05:22:26 +00002100static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2101{
Ariel Elior8db573b2013-01-01 05:22:37 +00002102 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002103 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002104
Ariel Eliorad5afc82013-01-01 05:22:26 +00002105 /* number of queues for statistics is number of eth queues + FCoE */
2106 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002107
Ariel Eliorad5afc82013-01-01 05:22:26 +00002108 /* Total number of FW statistics requests =
2109 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2110 * and fcoe l2 queue) stats + num of queues (which includes another 1
2111 * for fcoe l2 queue if applicable)
2112 */
2113 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2114
Ariel Elior8db573b2013-01-01 05:22:37 +00002115 /* vf stats appear in the request list, but their data is allocated by
2116 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2117 * it is used to determine where to place the vf stats queries in the
2118 * request struct
2119 */
2120 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002121 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002122
Ariel Eliorad5afc82013-01-01 05:22:26 +00002123 /* Request is built from stats_query_header and an array of
2124 * stats_query_cmd_group each of which contains
2125 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2126 * configured in the stats_query_header.
2127 */
2128 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002129 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2130 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002131 1 : 0));
2132
Ariel Elior8db573b2013-01-01 05:22:37 +00002133 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2134 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002135 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2136 num_groups * sizeof(struct stats_query_cmd_group);
2137
2138 /* Data for statistics requests + stats_counter
2139 * stats_counter holds per-STORM counters that are incremented
2140 * when STORM has finished with the current request.
2141 * memory for FCoE offloaded statistics are counted anyway,
2142 * even if they will not be sent.
2143 * VF stats are not accounted for here as the data of VF stats is stored
2144 * in memory allocated by the VF, not here.
2145 */
2146 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2147 sizeof(struct per_pf_stats) +
2148 sizeof(struct fcoe_statistics_params) +
2149 sizeof(struct per_queue_stats) * num_queue_stats +
2150 sizeof(struct stats_counter);
2151
2152 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2153 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2154
2155 /* Set shortcuts */
2156 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2157 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2158 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2159 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2160 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2161 bp->fw_stats_req_sz;
2162
2163 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2164 U64_HI(bp->fw_stats_req_mapping),
2165 U64_LO(bp->fw_stats_req_mapping));
2166 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2167 U64_HI(bp->fw_stats_data_mapping),
2168 U64_LO(bp->fw_stats_data_mapping));
2169 return 0;
2170
2171alloc_mem_err:
2172 bnx2x_free_fw_stats_mem(bp);
2173 BNX2X_ERR("Can't allocate FW stats memory\n");
2174 return -ENOMEM;
2175}
2176
2177/* send load request to mcp and analyze response */
2178static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2179{
2180 /* init fw_seq */
2181 bp->fw_seq =
2182 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2183 DRV_MSG_SEQ_NUMBER_MASK);
2184 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2185
2186 /* Get current FW pulse sequence */
2187 bp->fw_drv_pulse_wr_seq =
2188 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2189 DRV_PULSE_SEQ_MASK);
2190 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2191
2192 /* load request */
2193 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2194 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2195
2196 /* if mcp fails to respond we must abort */
2197 if (!(*load_code)) {
2198 BNX2X_ERR("MCP response failure, aborting\n");
2199 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002200 }
2201
Ariel Eliorad5afc82013-01-01 05:22:26 +00002202 /* If mcp refused (e.g. other port is in diagnostic mode) we
2203 * must abort
2204 */
2205 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2206 BNX2X_ERR("MCP refused load request, aborting\n");
2207 return -EBUSY;
2208 }
2209 return 0;
2210}
2211
2212/* check whether another PF has already loaded FW to chip. In
2213 * virtualized environments a pf from another VM may have already
2214 * initialized the device including loading FW
2215 */
2216int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2217{
2218 /* is another pf loaded on this engine? */
2219 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2220 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2221 /* build my FW version dword */
2222 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2223 (BCM_5710_FW_MINOR_VERSION << 8) +
2224 (BCM_5710_FW_REVISION_VERSION << 16) +
2225 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2226
2227 /* read loaded FW from chip */
2228 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2229
2230 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2231 loaded_fw, my_fw);
2232
2233 /* abort nic load if version mismatch */
2234 if (my_fw != loaded_fw) {
2235 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2236 loaded_fw, my_fw);
2237 return -EBUSY;
2238 }
2239 }
2240 return 0;
2241}
2242
2243/* returns the "mcp load_code" according to global load_count array */
2244static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2245{
2246 int path = BP_PATH(bp);
2247
2248 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2249 path, load_count[path][0], load_count[path][1],
2250 load_count[path][2]);
2251 load_count[path][0]++;
2252 load_count[path][1 + port]++;
2253 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2254 path, load_count[path][0], load_count[path][1],
2255 load_count[path][2]);
2256 if (load_count[path][0] == 1)
2257 return FW_MSG_CODE_DRV_LOAD_COMMON;
2258 else if (load_count[path][1 + port] == 1)
2259 return FW_MSG_CODE_DRV_LOAD_PORT;
2260 else
2261 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2262}
2263
2264/* mark PMF if applicable */
2265static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2266{
2267 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2268 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2269 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2270 bp->port.pmf = 1;
2271 /* We need the barrier to ensure the ordering between the
2272 * writing to bp->port.pmf here and reading it from the
2273 * bnx2x_periodic_task().
2274 */
2275 smp_mb();
2276 } else {
2277 bp->port.pmf = 0;
2278 }
2279
2280 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2281}
2282
2283static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2284{
2285 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2286 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2287 (bp->common.shmem2_base)) {
2288 if (SHMEM2_HAS(bp, dcc_support))
2289 SHMEM2_WR(bp, dcc_support,
2290 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2291 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2292 if (SHMEM2_HAS(bp, afex_driver_support))
2293 SHMEM2_WR(bp, afex_driver_support,
2294 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2295 }
2296
2297 /* Set AFEX default VLAN tag to an invalid value */
2298 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002299}
2300
Eric Dumazet1191cb82012-04-27 21:39:21 +00002301/**
2302 * bnx2x_bz_fp - zero content of the fastpath structure.
2303 *
2304 * @bp: driver handle
2305 * @index: fastpath index to be zeroed
2306 *
2307 * Makes sure the contents of the bp->fp[index].napi is kept
2308 * intact.
2309 */
2310static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2311{
2312 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00002313 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2314
Merav Sicron65565882012-06-19 07:48:26 +00002315 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002316 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002317 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002318 /* bzero bnx2x_fastpath contents */
Barak Witkowski15192a82012-06-19 07:48:28 +00002319 if (bp->stats_init) {
2320 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002321 memset(fp, 0, sizeof(*fp));
Barak Witkowski15192a82012-06-19 07:48:28 +00002322 } else {
Eric Dumazet1191cb82012-04-27 21:39:21 +00002323 /* Keep Queue statistics */
2324 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2325 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2326
2327 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2328 GFP_KERNEL);
2329 if (tmp_eth_q_stats)
Barak Witkowski15192a82012-06-19 07:48:28 +00002330 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
Eric Dumazet1191cb82012-04-27 21:39:21 +00002331 sizeof(struct bnx2x_eth_q_stats));
2332
2333 tmp_eth_q_stats_old =
2334 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2335 GFP_KERNEL);
2336 if (tmp_eth_q_stats_old)
Barak Witkowski15192a82012-06-19 07:48:28 +00002337 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00002338 sizeof(struct bnx2x_eth_q_stats_old));
2339
Barak Witkowski15192a82012-06-19 07:48:28 +00002340 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002341 memset(fp, 0, sizeof(*fp));
2342
2343 if (tmp_eth_q_stats) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002344 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2345 sizeof(struct bnx2x_eth_q_stats));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002346 kfree(tmp_eth_q_stats);
2347 }
2348
2349 if (tmp_eth_q_stats_old) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002350 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00002351 sizeof(struct bnx2x_eth_q_stats_old));
2352 kfree(tmp_eth_q_stats_old);
2353 }
2354
2355 }
2356
2357 /* Restore the NAPI object as it has been already initialized */
2358 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002359 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002360 fp->bp = bp;
2361 fp->index = index;
2362 if (IS_ETH_FP(fp))
2363 fp->max_cos = bp->max_cos;
2364 else
2365 /* Special queues support only one CoS */
2366 fp->max_cos = 1;
2367
Merav Sicron65565882012-06-19 07:48:26 +00002368 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002369 if (IS_FCOE_FP(fp))
2370 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002371 if (IS_ETH_FP(fp))
2372 for_each_cos_in_tx_queue(fp, cos)
2373 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2374 BNX2X_NUM_ETH_QUEUES(bp) + index];
2375
Eric Dumazet1191cb82012-04-27 21:39:21 +00002376 /*
2377 * set the tpa flag for each queue. The tpa flag determines the queue
2378 * minimal size so it must be set prior to queue memory allocation
2379 */
2380 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2381 (bp->flags & GRO_ENABLE_FLAG &&
2382 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2383 if (bp->flags & TPA_ENABLE_FLAG)
2384 fp->mode = TPA_MODE_LRO;
2385 else if (bp->flags & GRO_ENABLE_FLAG)
2386 fp->mode = TPA_MODE_GRO;
2387
Eric Dumazet1191cb82012-04-27 21:39:21 +00002388 /* We don't want TPA on an FCoE L2 ring */
2389 if (IS_FCOE_FP(fp))
2390 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002391}
2392
2393int bnx2x_load_cnic(struct bnx2x *bp)
2394{
2395 int i, rc, port = BP_PORT(bp);
2396
2397 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2398
2399 mutex_init(&bp->cnic_mutex);
2400
Ariel Eliorad5afc82013-01-01 05:22:26 +00002401 if (IS_PF(bp)) {
2402 rc = bnx2x_alloc_mem_cnic(bp);
2403 if (rc) {
2404 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2405 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2406 }
Merav Sicron55c11942012-11-07 00:45:48 +00002407 }
2408
2409 rc = bnx2x_alloc_fp_mem_cnic(bp);
2410 if (rc) {
2411 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2412 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2413 }
2414
2415 /* Update the number of queues with the cnic queues */
2416 rc = bnx2x_set_real_num_queues(bp, 1);
2417 if (rc) {
2418 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2419 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2420 }
2421
2422 /* Add all CNIC NAPI objects */
2423 bnx2x_add_all_napi_cnic(bp);
2424 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2425 bnx2x_napi_enable_cnic(bp);
2426
2427 rc = bnx2x_init_hw_func_cnic(bp);
2428 if (rc)
2429 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2430
2431 bnx2x_nic_init_cnic(bp);
2432
Ariel Eliorad5afc82013-01-01 05:22:26 +00002433 if (IS_PF(bp)) {
2434 /* Enable Timer scan */
2435 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002436
Ariel Eliorad5afc82013-01-01 05:22:26 +00002437 /* setup cnic queues */
2438 for_each_cnic_queue(bp, i) {
2439 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2440 if (rc) {
2441 BNX2X_ERR("Queue setup failed\n");
2442 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2443 }
Merav Sicron55c11942012-11-07 00:45:48 +00002444 }
2445 }
2446
2447 /* Initialize Rx filter. */
2448 netif_addr_lock_bh(bp->dev);
2449 bnx2x_set_rx_mode(bp->dev);
2450 netif_addr_unlock_bh(bp->dev);
2451
2452 /* re-read iscsi info */
2453 bnx2x_get_iscsi_info(bp);
2454 bnx2x_setup_cnic_irq_info(bp);
2455 bnx2x_setup_cnic_info(bp);
2456 bp->cnic_loaded = true;
2457 if (bp->state == BNX2X_STATE_OPEN)
2458 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2459
2460
2461 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2462
2463 return 0;
2464
2465#ifndef BNX2X_STOP_ON_ERROR
2466load_error_cnic2:
2467 /* Disable Timer scan */
2468 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2469
2470load_error_cnic1:
2471 bnx2x_napi_disable_cnic(bp);
2472 /* Update the number of queues without the cnic queues */
2473 rc = bnx2x_set_real_num_queues(bp, 0);
2474 if (rc)
2475 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2476load_error_cnic0:
2477 BNX2X_ERR("CNIC-related load failed\n");
2478 bnx2x_free_fp_mem_cnic(bp);
2479 bnx2x_free_mem_cnic(bp);
2480 return rc;
2481#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002482}
2483
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002484/* must be called with rtnl_lock */
2485int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2486{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002487 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002488 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002489
Merav Sicron55c11942012-11-07 00:45:48 +00002490 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2491 DP(NETIF_MSG_IFUP,
2492 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2493
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002494#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002495 if (unlikely(bp->panic)) {
2496 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002497 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002498 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002499#endif
2500
2501 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2502
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002503 /* Set the initial link reported state to link down */
2504 bnx2x_acquire_phy_lock(bp);
2505 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2506 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2507 &bp->last_reported_link.link_report_flags);
2508 bnx2x_release_phy_lock(bp);
2509
Ariel Eliorad5afc82013-01-01 05:22:26 +00002510 if (IS_PF(bp))
2511 /* must be called before memory allocation and HW init */
2512 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002513
Ariel Elior6383c0b2011-07-14 08:31:57 +00002514 /*
2515 * Zero fastpath structures preserving invariants like napi, which are
2516 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002517 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002518 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002519 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002520 for_each_queue(bp, i)
2521 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002522 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2523 bp->num_cnic_queues) *
2524 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002525
Merav Sicron55c11942012-11-07 00:45:48 +00002526 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002527
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002528 /* Set the receive queues buffer size */
2529 bnx2x_set_rx_buf_size(bp);
2530
Ariel Eliorad5afc82013-01-01 05:22:26 +00002531 if (IS_PF(bp)) {
2532 rc = bnx2x_alloc_mem(bp);
2533 if (rc) {
2534 BNX2X_ERR("Unable to allocate bp memory\n");
2535 return rc;
2536 }
2537 }
2538
2539 /* Allocated memory for FW statistics */
2540 if (bnx2x_alloc_fw_stats_mem(bp))
2541 LOAD_ERROR_EXIT(bp, load_error0);
2542
2543 /* need to be done after alloc mem, since it's self adjusting to amount
2544 * of memory available for RSS queues
2545 */
2546 rc = bnx2x_alloc_fp_mem(bp);
2547 if (rc) {
2548 BNX2X_ERR("Unable to allocate memory for fps\n");
2549 LOAD_ERROR_EXIT(bp, load_error0);
2550 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002551
Ariel Elior8d9ac292013-01-01 05:22:27 +00002552 /* request pf to initialize status blocks */
2553 if (IS_VF(bp)) {
2554 rc = bnx2x_vfpf_init(bp);
2555 if (rc)
2556 LOAD_ERROR_EXIT(bp, load_error0);
2557 }
2558
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002559 /* As long as bnx2x_alloc_mem() may possibly update
2560 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002561 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002562 */
Merav Sicron55c11942012-11-07 00:45:48 +00002563 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002564 if (rc) {
2565 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002566 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002567 }
2568
Ariel Elior6383c0b2011-07-14 08:31:57 +00002569 /* configure multi cos mappings in kernel.
2570 * this configuration may be overriden by a multi class queue discipline
2571 * or by a dcbx negotiation result.
2572 */
2573 bnx2x_setup_tc(bp->dev, bp->max_cos);
2574
Merav Sicron26614ba2012-08-27 03:26:19 +00002575 /* Add all NAPI objects */
2576 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002577 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002578 bnx2x_napi_enable(bp);
2579
Ariel Eliorad5afc82013-01-01 05:22:26 +00002580 if (IS_PF(bp)) {
2581 /* set pf load just before approaching the MCP */
2582 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002583
Ariel Eliorad5afc82013-01-01 05:22:26 +00002584 /* if mcp exists send load request and analyze response */
2585 if (!BP_NOMCP(bp)) {
2586 /* attempt to load pf */
2587 rc = bnx2x_nic_load_request(bp, &load_code);
2588 if (rc)
2589 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002590
Ariel Eliorad5afc82013-01-01 05:22:26 +00002591 /* what did mcp say? */
2592 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2593 if (rc) {
2594 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002595 LOAD_ERROR_EXIT(bp, load_error2);
2596 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002597 } else {
2598 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002599 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002600
Ariel Eliorad5afc82013-01-01 05:22:26 +00002601 /* mark pmf if applicable */
2602 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002603
Ariel Eliorad5afc82013-01-01 05:22:26 +00002604 /* Init Function state controlling object */
2605 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002606
Ariel Eliorad5afc82013-01-01 05:22:26 +00002607 /* Initialize HW */
2608 rc = bnx2x_init_hw(bp, load_code);
2609 if (rc) {
2610 BNX2X_ERR("HW init failed, aborting\n");
2611 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2612 LOAD_ERROR_EXIT(bp, load_error2);
2613 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002614 }
2615
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002616 /* Connect to IRQs */
2617 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002618 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002619 BNX2X_ERR("setup irqs failed\n");
2620 if (IS_PF(bp))
2621 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002622 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002623 }
2624
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002625 /* Setup NIC internals and enable interrupts */
2626 bnx2x_nic_init(bp, load_code);
2627
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002628 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002629 if (IS_PF(bp)) {
2630 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002631 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002632
Ariel Eliorad5afc82013-01-01 05:22:26 +00002633 /* Set AFEX default VLAN tag to an invalid value */
2634 bp->afex_def_vlan_tag = -1;
2635 bnx2x_nic_load_afex_dcc(bp, load_code);
2636 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2637 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002638 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002639 BNX2X_ERR("Function start failed!\n");
2640 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2641
Merav Sicron55c11942012-11-07 00:45:48 +00002642 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002643 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002644
Ariel Eliorad5afc82013-01-01 05:22:26 +00002645 /* Send LOAD_DONE command to MCP */
2646 if (!BP_NOMCP(bp)) {
2647 load_code = bnx2x_fw_command(bp,
2648 DRV_MSG_CODE_LOAD_DONE, 0);
2649 if (!load_code) {
2650 BNX2X_ERR("MCP response failure, aborting\n");
2651 rc = -EBUSY;
2652 LOAD_ERROR_EXIT(bp, load_error3);
2653 }
2654 }
2655
2656 /* setup the leading queue */
2657 rc = bnx2x_setup_leading(bp);
2658 if (rc) {
2659 BNX2X_ERR("Setup leading failed!\n");
2660 LOAD_ERROR_EXIT(bp, load_error3);
2661 }
2662
2663 /* set up the rest of the queues */
2664 for_each_nondefault_eth_queue(bp, i) {
2665 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2666 if (rc) {
2667 BNX2X_ERR("Queue setup failed\n");
2668 LOAD_ERROR_EXIT(bp, load_error3);
2669 }
2670 }
2671
2672 /* setup rss */
2673 rc = bnx2x_init_rss_pf(bp);
2674 if (rc) {
2675 BNX2X_ERR("PF RSS init failed\n");
2676 LOAD_ERROR_EXIT(bp, load_error3);
2677 }
Ariel Elior8d9ac292013-01-01 05:22:27 +00002678
2679 } else { /* vf */
2680 for_each_eth_queue(bp, i) {
2681 rc = bnx2x_vfpf_setup_q(bp, i);
2682 if (rc) {
2683 BNX2X_ERR("Queue setup failed\n");
2684 LOAD_ERROR_EXIT(bp, load_error3);
2685 }
2686 }
Merav Sicron51c1a582012-03-18 10:33:38 +00002687 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002688
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002689 /* Now when Clients are configured we are ready to work */
2690 bp->state = BNX2X_STATE_OPEN;
2691
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002692 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002693 if (IS_PF(bp))
2694 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002695 else /* vf */
2696 rc = bnx2x_vfpf_set_mac(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002697 if (rc) {
2698 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002699 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002700 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002701
Ariel Eliorad5afc82013-01-01 05:22:26 +00002702 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002703 bnx2x_update_max_mf_config(bp, bp->pending_max);
2704 bp->pending_max = 0;
2705 }
2706
Ariel Eliorad5afc82013-01-01 05:22:26 +00002707 if (bp->port.pmf) {
2708 rc = bnx2x_initial_phy_init(bp, load_mode);
2709 if (rc)
2710 LOAD_ERROR_EXIT(bp, load_error3);
2711 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002712 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002713
2714 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002715
2716 /* Initialize Rx filter. */
2717 netif_addr_lock_bh(bp->dev);
2718 bnx2x_set_rx_mode(bp->dev);
2719 netif_addr_unlock_bh(bp->dev);
2720
2721 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002722 switch (load_mode) {
2723 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002724 /* Tx queue should be only reenabled */
2725 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002726 break;
2727
2728 case LOAD_OPEN:
2729 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002730 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002731 break;
2732
2733 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002734 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002735 bp->state = BNX2X_STATE_DIAG;
2736 break;
2737
2738 default:
2739 break;
2740 }
2741
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002742 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002743 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002744 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002745 bnx2x__link_status_update(bp);
2746
2747 /* start the timer */
2748 mod_timer(&bp->timer, jiffies + bp->current_interval);
2749
Merav Sicron55c11942012-11-07 00:45:48 +00002750 if (CNIC_ENABLED(bp))
2751 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002752
Ariel Eliorad5afc82013-01-01 05:22:26 +00002753 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2754 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002755 u32 val;
2756 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2757 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2758 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2759 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2760 }
2761
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002762 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002763 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002764 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002765 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002766 return -EBUSY;
2767 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002768
Barak Witkowski98768792012-06-19 07:48:31 +00002769 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2770 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2771 bnx2x_dcbx_init(bp, false);
2772
Merav Sicron55c11942012-11-07 00:45:48 +00002773 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2774
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002775 return 0;
2776
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002777#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002778load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002779 if (IS_PF(bp)) {
2780 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002781
Ariel Eliorad5afc82013-01-01 05:22:26 +00002782 /* Clean queueable objects */
2783 bnx2x_squeeze_objects(bp);
2784 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002785
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002786 /* Free SKBs, SGEs, TPA pool and driver internals */
2787 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002788 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002789 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002790
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002791 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002792 bnx2x_free_irq(bp);
2793load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002794 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002795 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2796 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2797 }
2798
2799 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002800load_error1:
2801 bnx2x_napi_disable(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002802
Ariel Elior889b9af2012-01-26 06:01:51 +00002803 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002804 if (IS_PF(bp))
2805 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002806load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002807 bnx2x_free_fp_mem(bp);
2808 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002809 bnx2x_free_mem(bp);
2810
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002811 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002812#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002813}
2814
Ariel Eliorad5afc82013-01-01 05:22:26 +00002815static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2816{
2817 u8 rc = 0, cos, i;
2818
2819 /* Wait until tx fastpath tasks complete */
2820 for_each_tx_queue(bp, i) {
2821 struct bnx2x_fastpath *fp = &bp->fp[i];
2822
2823 for_each_cos_in_tx_queue(fp, cos)
2824 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2825 if (rc)
2826 return rc;
2827 }
2828 return 0;
2829}
2830
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002831/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002832int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002833{
2834 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002835 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002836
Merav Sicron55c11942012-11-07 00:45:48 +00002837 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2838
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002839 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002840 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002841 u32 val;
2842 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2843 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2844 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2845 }
2846
Ariel Eliorad5afc82013-01-01 05:22:26 +00002847 if (IS_PF(bp) &&
2848 (bp->state == BNX2X_STATE_CLOSED ||
2849 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002850 /* We can get here if the driver has been unloaded
2851 * during parity error recovery and is either waiting for a
2852 * leader to complete or for other functions to unload and
2853 * then ifdown has been issued. In this case we want to
2854 * unload and let other functions to complete a recovery
2855 * process.
2856 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002857 bp->recovery_state = BNX2X_RECOVERY_DONE;
2858 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002859 bnx2x_release_leader_lock(bp);
2860 smp_mb();
2861
Merav Sicron51c1a582012-03-18 10:33:38 +00002862 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2863 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864 return -EINVAL;
2865 }
2866
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002867 /*
2868 * It's important to set the bp->state to the value different from
2869 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2870 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2871 */
2872 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2873 smp_mb();
2874
Merav Sicron55c11942012-11-07 00:45:48 +00002875 if (CNIC_LOADED(bp))
2876 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2877
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002878 /* Stop Tx */
2879 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002880 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002881
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002882 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002883
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002884 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002885
Ariel Eliorad5afc82013-01-01 05:22:26 +00002886 if (IS_PF(bp)) {
2887 /* Set ALWAYS_ALIVE bit in shmem */
2888 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2889 bnx2x_drv_pulse(bp);
2890 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2891 bnx2x_save_statistics(bp);
2892 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002893
Ariel Eliorad5afc82013-01-01 05:22:26 +00002894 /* wait till consumers catch up with producers in all queues */
2895 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002896
Ariel Elior9b176b62013-01-01 05:22:28 +00002897 /* if VF indicate to PF this function is going down (PF will delete sp
2898 * elements and clear initializations
2899 */
2900 if (IS_VF(bp))
2901 bnx2x_vfpf_close_vf(bp);
2902 else if (unload_mode != UNLOAD_RECOVERY)
2903 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002904 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002905 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002906 /* Send the UNLOAD_REQUEST to the MCP */
2907 bnx2x_send_unload_req(bp, unload_mode);
2908
2909 /*
2910 * Prevent transactions to host from the functions on the
2911 * engine that doesn't reset global blocks in case of global
2912 * attention once gloabl blocks are reset and gates are opened
2913 * (the engine which leader will perform the recovery
2914 * last).
2915 */
2916 if (!CHIP_IS_E1x(bp))
2917 bnx2x_pf_disable(bp);
2918
2919 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002920 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002921 /* Delete all NAPI objects */
2922 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002923 if (CNIC_LOADED(bp))
2924 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002925 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002926 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002927
2928 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002929 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002930 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002931
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002932 /*
2933 * At this stage no more interrupts will arrive so we may safly clean
2934 * the queueable objects here in case they failed to get cleaned so far.
2935 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002936 if (IS_PF(bp))
2937 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002938
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002939 /* There should be no more pending SP commands at this stage */
2940 bp->sp_state = 0;
2941
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942 bp->port.pmf = 0;
2943
2944 /* Free SKBs, SGEs, TPA pool and driver internals */
2945 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002946 if (CNIC_LOADED(bp))
2947 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002948 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002949 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002950
Ariel Eliorad5afc82013-01-01 05:22:26 +00002951 bnx2x_free_fp_mem(bp);
2952 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00002953 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002954
Ariel Eliorad5afc82013-01-01 05:22:26 +00002955 if (IS_PF(bp)) {
2956 bnx2x_free_mem(bp);
2957 if (CNIC_LOADED(bp))
2958 bnx2x_free_mem_cnic(bp);
2959 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002960 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00002961 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002962
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002963 /* Check if there are pending parity attentions. If there are - set
2964 * RECOVERY_IN_PROGRESS.
2965 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002966 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002967 bnx2x_set_reset_in_progress(bp);
2968
2969 /* Set RESET_IS_GLOBAL if needed */
2970 if (global)
2971 bnx2x_set_reset_global(bp);
2972 }
2973
2974
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002975 /* The last driver must disable a "close the gate" if there is no
2976 * parity attention or "process kill" pending.
2977 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002978 if (IS_PF(bp) &&
2979 !bnx2x_clear_pf_load(bp) &&
2980 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002981 bnx2x_disable_close_the_gate(bp);
2982
Merav Sicron55c11942012-11-07 00:45:48 +00002983 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2984
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002985 return 0;
2986}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002987
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002988int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2989{
2990 u16 pmcsr;
2991
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002992 /* If there is no power capability, silently succeed */
2993 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002994 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002995 return 0;
2996 }
2997
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002998 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2999
3000 switch (state) {
3001 case PCI_D0:
3002 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3003 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3004 PCI_PM_CTRL_PME_STATUS));
3005
3006 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3007 /* delay required during transition out of D3hot */
3008 msleep(20);
3009 break;
3010
3011 case PCI_D3hot:
3012 /* If there are other clients above don't
3013 shut down the power */
3014 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3015 return 0;
3016 /* Don't shut down the power for emulation and FPGA */
3017 if (CHIP_REV_IS_SLOW(bp))
3018 return 0;
3019
3020 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3021 pmcsr |= 3;
3022
3023 if (bp->wol)
3024 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3025
3026 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3027 pmcsr);
3028
3029 /* No more memory access after this point until
3030 * device is brought back to D0.
3031 */
3032 break;
3033
3034 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003035 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003036 return -EINVAL;
3037 }
3038 return 0;
3039}
3040
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003041/*
3042 * net_device service functions
3043 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003044int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003045{
3046 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003047 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003048 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3049 napi);
3050 struct bnx2x *bp = fp->bp;
3051
3052 while (1) {
3053#ifdef BNX2X_STOP_ON_ERROR
3054 if (unlikely(bp->panic)) {
3055 napi_complete(napi);
3056 return 0;
3057 }
3058#endif
3059
Ariel Elior6383c0b2011-07-14 08:31:57 +00003060 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003061 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3062 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003063
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003064 if (bnx2x_has_rx_work(fp)) {
3065 work_done += bnx2x_rx_int(fp, budget - work_done);
3066
3067 /* must not complete if we consumed full budget */
3068 if (work_done >= budget)
3069 break;
3070 }
3071
3072 /* Fall out from the NAPI loop if needed */
3073 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003074
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003075 /* No need to update SB for FCoE L2 ring as long as
3076 * it's connected to the default SB and the SB
3077 * has been updated when NAPI was scheduled.
3078 */
3079 if (IS_FCOE_FP(fp)) {
3080 napi_complete(napi);
3081 break;
3082 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003083 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003084 /* bnx2x_has_rx_work() reads the status block,
3085 * thus we need to ensure that status block indices
3086 * have been actually read (bnx2x_update_fpsb_idx)
3087 * prior to this check (bnx2x_has_rx_work) so that
3088 * we won't write the "newer" value of the status block
3089 * to IGU (if there was a DMA right after
3090 * bnx2x_has_rx_work and if there is no rmb, the memory
3091 * reading (bnx2x_update_fpsb_idx) may be postponed
3092 * to right before bnx2x_ack_sb). In this case there
3093 * will never be another interrupt until there is
3094 * another update of the status block, while there
3095 * is still unhandled work.
3096 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003097 rmb();
3098
3099 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3100 napi_complete(napi);
3101 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003102 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003103 "Update index to %d\n", fp->fp_hc_idx);
3104 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3105 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003106 IGU_INT_ENABLE, 1);
3107 break;
3108 }
3109 }
3110 }
3111
3112 return work_done;
3113}
3114
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003115/* we split the first BD into headers and data BDs
3116 * to ease the pain of our fellow microcode engineers
3117 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003118 */
3119static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003120 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121 struct sw_tx_bd *tx_buf,
3122 struct eth_tx_start_bd **tx_bd, u16 hlen,
3123 u16 bd_prod, int nbd)
3124{
3125 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3126 struct eth_tx_bd *d_tx_bd;
3127 dma_addr_t mapping;
3128 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3129
3130 /* first fix first BD */
3131 h_tx_bd->nbd = cpu_to_le16(nbd);
3132 h_tx_bd->nbytes = cpu_to_le16(hlen);
3133
Merav Sicron51c1a582012-03-18 10:33:38 +00003134 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3135 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003136
3137 /* now get a new data BD
3138 * (after the pbd) and fill it */
3139 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003140 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003141
3142 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3143 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3144
3145 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3146 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3147 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3148
3149 /* this marks the BD as one that has no individual mapping */
3150 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3151
3152 DP(NETIF_MSG_TX_QUEUED,
3153 "TSO split data size is %d (%x:%x)\n",
3154 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3155
3156 /* update tx_bd */
3157 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3158
3159 return bd_prod;
3160}
3161
3162static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3163{
3164 if (fix > 0)
3165 csum = (u16) ~csum_fold(csum_sub(csum,
3166 csum_partial(t_header - fix, fix, 0)));
3167
3168 else if (fix < 0)
3169 csum = (u16) ~csum_fold(csum_add(csum,
3170 csum_partial(t_header, -fix, 0)));
3171
3172 return swab16(csum);
3173}
3174
3175static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3176{
3177 u32 rc;
3178
3179 if (skb->ip_summed != CHECKSUM_PARTIAL)
3180 rc = XMIT_PLAIN;
3181
3182 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00003183 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003184 rc = XMIT_CSUM_V6;
3185 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3186 rc |= XMIT_CSUM_TCP;
3187
3188 } else {
3189 rc = XMIT_CSUM_V4;
3190 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3191 rc |= XMIT_CSUM_TCP;
3192 }
3193 }
3194
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00003195 if (skb_is_gso_v6(skb))
3196 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3197 else if (skb_is_gso(skb))
3198 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003199
3200 return rc;
3201}
3202
3203#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3204/* check if packet requires linearization (packet is too fragmented)
3205 no need to check fragmentation if page size > 8K (there will be no
3206 violation to FW restrictions) */
3207static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3208 u32 xmit_type)
3209{
3210 int to_copy = 0;
3211 int hlen = 0;
3212 int first_bd_sz = 0;
3213
3214 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3215 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3216
3217 if (xmit_type & XMIT_GSO) {
3218 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3219 /* Check if LSO packet needs to be copied:
3220 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3221 int wnd_size = MAX_FETCH_BD - 3;
3222 /* Number of windows to check */
3223 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3224 int wnd_idx = 0;
3225 int frag_idx = 0;
3226 u32 wnd_sum = 0;
3227
3228 /* Headers length */
3229 hlen = (int)(skb_transport_header(skb) - skb->data) +
3230 tcp_hdrlen(skb);
3231
3232 /* Amount of data (w/o headers) on linear part of SKB*/
3233 first_bd_sz = skb_headlen(skb) - hlen;
3234
3235 wnd_sum = first_bd_sz;
3236
3237 /* Calculate the first sum - it's special */
3238 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3239 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003240 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003241
3242 /* If there was data on linear skb data - check it */
3243 if (first_bd_sz > 0) {
3244 if (unlikely(wnd_sum < lso_mss)) {
3245 to_copy = 1;
3246 goto exit_lbl;
3247 }
3248
3249 wnd_sum -= first_bd_sz;
3250 }
3251
3252 /* Others are easier: run through the frag list and
3253 check all windows */
3254 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3255 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003256 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003257
3258 if (unlikely(wnd_sum < lso_mss)) {
3259 to_copy = 1;
3260 break;
3261 }
3262 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003263 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003264 }
3265 } else {
3266 /* in non-LSO too fragmented packet should always
3267 be linearized */
3268 to_copy = 1;
3269 }
3270 }
3271
3272exit_lbl:
3273 if (unlikely(to_copy))
3274 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003275 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003276 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3277 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3278
3279 return to_copy;
3280}
3281#endif
3282
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003283static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3284 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003285{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003286 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3287 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3288 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003289 if ((xmit_type & XMIT_GSO_V6) &&
3290 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003291 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003292}
3293
3294/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003295 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003296 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003297 * @skb: packet skb
3298 * @pbd: parse BD
3299 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003300 */
3301static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3302 struct eth_tx_parse_bd_e1x *pbd,
3303 u32 xmit_type)
3304{
3305 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3306 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3307 pbd->tcp_flags = pbd_tcp_flags(skb);
3308
3309 if (xmit_type & XMIT_GSO_V4) {
3310 pbd->ip_id = swab16(ip_hdr(skb)->id);
3311 pbd->tcp_pseudo_csum =
3312 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3313 ip_hdr(skb)->daddr,
3314 0, IPPROTO_TCP, 0));
3315
3316 } else
3317 pbd->tcp_pseudo_csum =
3318 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3319 &ipv6_hdr(skb)->daddr,
3320 0, IPPROTO_TCP, 0));
3321
3322 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3323}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003324
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003325/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003326 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003327 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003328 * @bp: driver handle
3329 * @skb: packet skb
3330 * @parsing_data: data to be updated
3331 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003332 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003333 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003334 */
3335static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Yuval Mintz2de67432013-01-23 03:21:43 +00003336 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003337{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003338 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003339 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3340 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3341 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003342
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003343 if (xmit_type & XMIT_CSUM_TCP) {
3344 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3345 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3346 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003347
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003348 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3349 } else
3350 /* We support checksum offload for TCP and UDP only.
3351 * No need to pass the UDP header length - it's a constant.
3352 */
3353 return skb_transport_header(skb) +
3354 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003355}
3356
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003357static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3358 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3359{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003360 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3361
3362 if (xmit_type & XMIT_CSUM_V4)
3363 tx_start_bd->bd_flags.as_bitfield |=
3364 ETH_TX_BD_FLAGS_IP_CSUM;
3365 else
3366 tx_start_bd->bd_flags.as_bitfield |=
3367 ETH_TX_BD_FLAGS_IPV6;
3368
3369 if (!(xmit_type & XMIT_CSUM_TCP))
3370 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003371}
3372
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003373/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003374 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003375 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003376 * @bp: driver handle
3377 * @skb: packet skb
3378 * @pbd: parse BD to be updated
3379 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003380 */
3381static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3382 struct eth_tx_parse_bd_e1x *pbd,
3383 u32 xmit_type)
3384{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003385 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003386
3387 /* for now NS flag is not used in Linux */
3388 pbd->global_data =
3389 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3390 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3391
3392 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003393 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003394
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003395 hlen += pbd->ip_hlen_w;
3396
3397 /* We support checksum offload for TCP and UDP only */
3398 if (xmit_type & XMIT_CSUM_TCP)
3399 hlen += tcp_hdrlen(skb) / 2;
3400 else
3401 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003402
3403 pbd->total_hlen_w = cpu_to_le16(hlen);
3404 hlen = hlen*2;
3405
3406 if (xmit_type & XMIT_CSUM_TCP) {
3407 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3408
3409 } else {
3410 s8 fix = SKB_CS_OFF(skb); /* signed! */
3411
3412 DP(NETIF_MSG_TX_QUEUED,
3413 "hlen %d fix %d csum before fix %x\n",
3414 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3415
3416 /* HW bug: fixup the CSUM */
3417 pbd->tcp_pseudo_csum =
3418 bnx2x_csum_fix(skb_transport_header(skb),
3419 SKB_CS(skb), fix);
3420
3421 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3422 pbd->tcp_pseudo_csum);
3423 }
3424
3425 return hlen;
3426}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003428/* called with netif_tx_lock
3429 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3430 * netif_wake_queue()
3431 */
3432netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3433{
3434 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003435
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003436 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003437 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003438 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003439 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003440 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003441 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003442 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003443 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003444 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003445 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003446 dma_addr_t mapping;
3447 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3448 int i;
3449 u8 hlen = 0;
3450 __le16 pkt_size = 0;
3451 struct ethhdr *eth;
3452 u8 mac_type = UNICAST_ADDRESS;
3453
3454#ifdef BNX2X_STOP_ON_ERROR
3455 if (unlikely(bp->panic))
3456 return NETDEV_TX_BUSY;
3457#endif
3458
Ariel Elior6383c0b2011-07-14 08:31:57 +00003459 txq_index = skb_get_queue_mapping(skb);
3460 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003461
Merav Sicron55c11942012-11-07 00:45:48 +00003462 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003463
Merav Sicron65565882012-06-19 07:48:26 +00003464 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003465
3466 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003467 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003468 txq_index, fp_index, txdata_index); */
3469
Ariel Elior6383c0b2011-07-14 08:31:57 +00003470 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003471 DP(NETIF_MSG_TX_QUEUED,
3472 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003473 txdata->cid, fp_index, txdata_index, txdata, fp); */
3474
3475 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003476 skb_shinfo(skb)->nr_frags +
3477 BDS_PER_TX_PKT +
3478 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003479 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003480 if (txdata->tx_ring_size == 0) {
3481 struct bnx2x_eth_q_stats *q_stats =
3482 bnx2x_fp_qstats(bp, txdata->parent_fp);
3483 q_stats->driver_filtered_tx_pkt++;
3484 dev_kfree_skb(skb);
3485 return NETDEV_TX_OK;
3486 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003487 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3488 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003489 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003490
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003491 return NETDEV_TX_BUSY;
3492 }
3493
Merav Sicron51c1a582012-03-18 10:33:38 +00003494 DP(NETIF_MSG_TX_QUEUED,
3495 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003496 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003497 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3498
3499 eth = (struct ethhdr *)skb->data;
3500
3501 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3502 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3503 if (is_broadcast_ether_addr(eth->h_dest))
3504 mac_type = BROADCAST_ADDRESS;
3505 else
3506 mac_type = MULTICAST_ADDRESS;
3507 }
3508
3509#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3510 /* First, check if we need to linearize the skb (due to FW
3511 restrictions). No need to check fragmentation if page size > 8K
3512 (there will be no violation to FW restrictions) */
3513 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3514 /* Statistics of linearization */
3515 bp->lin_cnt++;
3516 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003517 DP(NETIF_MSG_TX_QUEUED,
3518 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003519 dev_kfree_skb_any(skb);
3520 return NETDEV_TX_OK;
3521 }
3522 }
3523#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003524 /* Map skb linear data for DMA */
3525 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3526 skb_headlen(skb), DMA_TO_DEVICE);
3527 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003528 DP(NETIF_MSG_TX_QUEUED,
3529 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003530 dev_kfree_skb_any(skb);
3531 return NETDEV_TX_OK;
3532 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003533 /*
3534 Please read carefully. First we use one BD which we mark as start,
3535 then we have a parsing info BD (used for TSO or xsum),
3536 and only then we have the rest of the TSO BDs.
3537 (don't forget to mark the last one as last,
3538 and to unmap only AFTER you write to the BD ...)
3539 And above all, all pdb sizes are in words - NOT DWORDS!
3540 */
3541
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003542 /* get current pkt produced now - advance it just before sending packet
3543 * since mapping of pages may fail and cause packet to be dropped
3544 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003545 pkt_prod = txdata->tx_pkt_prod;
3546 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003547
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003548 /* get a tx_buf and first BD
3549 * tx_start_bd may be changed during SPLIT,
3550 * but first_bd will always stay first
3551 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003552 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3553 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003554 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003555
3556 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003557 SET_FLAG(tx_start_bd->general_data,
3558 ETH_TX_START_BD_PARSE_NBDS,
3559 0);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003560
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003561 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003562 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003563
3564 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003565 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003566 tx_buf->skb = skb;
3567 tx_buf->flags = 0;
3568
3569 DP(NETIF_MSG_TX_QUEUED,
3570 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003571 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003572
Jesse Grosseab6d182010-10-20 13:56:03 +00003573 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003574 tx_start_bd->vlan_or_ethertype =
3575 cpu_to_le16(vlan_tx_tag_get(skb));
3576 tx_start_bd->bd_flags.as_bitfield |=
3577 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003578 } else {
3579 /* when transmitting in a vf, start bd must hold the ethertype
3580 * for fw to enforce it
3581 */
Yuval Mintz823e1d92013-01-14 05:11:47 +00003582#ifndef BNX2X_STOP_ON_ERROR
Ariel Eliordc1ba592013-01-01 05:22:30 +00003583 if (IS_VF(bp)) {
Yuval Mintz823e1d92013-01-14 05:11:47 +00003584#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003585 tx_start_bd->vlan_or_ethertype =
3586 cpu_to_le16(ntohs(eth->h_proto));
Yuval Mintz823e1d92013-01-14 05:11:47 +00003587#ifndef BNX2X_STOP_ON_ERROR
Ariel Eliordc1ba592013-01-01 05:22:30 +00003588 } else {
3589 /* used by FW for packet accounting */
3590 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3591 }
Yuval Mintz823e1d92013-01-14 05:11:47 +00003592#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003593 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003594
3595 /* turn on parsing and get a BD */
3596 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003597
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003598 if (xmit_type & XMIT_CSUM)
3599 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003600
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003601 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003602 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003603 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3604 /* Set PBD in checksum offload case */
3605 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003606 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3607 &pbd_e2_parsing_data,
3608 xmit_type);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003609
3610 if (IS_MF_SI(bp) || IS_VF(bp)) {
3611 /* fill in the MAC addresses in the PBD - for local
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003612 * switching
3613 */
3614 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3615 &pbd_e2->src_mac_addr_mid,
3616 &pbd_e2->src_mac_addr_lo,
3617 eth->h_source);
3618 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3619 &pbd_e2->dst_mac_addr_mid,
3620 &pbd_e2->dst_mac_addr_lo,
3621 eth->h_dest);
3622 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003623
3624 SET_FLAG(pbd_e2_parsing_data,
3625 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003626 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003627 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003628 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003629 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3630 /* Set PBD in checksum offload case */
3631 if (xmit_type & XMIT_CSUM)
3632 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003633
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003634 SET_FLAG(global_data,
3635 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3636 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003637 }
3638
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003639 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003640 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3641 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003642 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003643 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3644 pkt_size = tx_start_bd->nbytes;
3645
Merav Sicron51c1a582012-03-18 10:33:38 +00003646 DP(NETIF_MSG_TX_QUEUED,
3647 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003648 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3649 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003650 tx_start_bd->bd_flags.as_bitfield,
3651 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003652
3653 if (xmit_type & XMIT_GSO) {
3654
3655 DP(NETIF_MSG_TX_QUEUED,
3656 "TSO packet len %d hlen %d total len %d tso size %d\n",
3657 skb->len, hlen, skb_headlen(skb),
3658 skb_shinfo(skb)->gso_size);
3659
3660 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3661
3662 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003663 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3664 &tx_start_bd, hlen,
3665 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003666 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003667 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3668 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003669 else
3670 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003671 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003672
3673 /* Set the PBD's parsing_data field if not zero
3674 * (for the chips newer than 57711).
3675 */
3676 if (pbd_e2_parsing_data)
3677 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3678
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003679 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3680
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003681 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003682 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3683 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3684
Eric Dumazet9e903e02011-10-18 21:00:24 +00003685 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3686 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003687 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003688 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003689
Merav Sicron51c1a582012-03-18 10:33:38 +00003690 DP(NETIF_MSG_TX_QUEUED,
3691 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003692
3693 /* we need unmap all buffers already mapped
3694 * for this SKB;
3695 * first_bd->nbd need to be properly updated
3696 * before call to bnx2x_free_tx_pkt
3697 */
3698 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003699 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003700 TX_BD(txdata->tx_pkt_prod),
3701 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003702 return NETDEV_TX_OK;
3703 }
3704
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003705 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003706 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003707 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003708 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003709
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003710 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3711 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003712 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3713 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003714 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003715
3716 DP(NETIF_MSG_TX_QUEUED,
3717 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3718 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3719 le16_to_cpu(tx_data_bd->nbytes));
3720 }
3721
3722 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3723
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003724 /* update with actual num BDs */
3725 first_bd->nbd = cpu_to_le16(nbd);
3726
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003727 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3728
3729 /* now send a tx doorbell, counting the next BD
3730 * if the packet contains or ends with it
3731 */
3732 if (TX_BD_POFF(bd_prod) < nbd)
3733 nbd++;
3734
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003735 /* total_pkt_bytes should be set on the first data BD if
3736 * it's not an LSO packet and there is more than one
3737 * data BD. In this case pkt_size is limited by an MTU value.
3738 * However we prefer to set it for an LSO packet (while we don't
3739 * have to) in order to save some CPU cycles in a none-LSO
3740 * case, when we much more care about them.
3741 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003742 if (total_pkt_bd != NULL)
3743 total_pkt_bd->total_pkt_bytes = pkt_size;
3744
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003745 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003746 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003747 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003748 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3749 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3750 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3751 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003752 if (pbd_e2)
3753 DP(NETIF_MSG_TX_QUEUED,
3754 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3755 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3756 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3757 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3758 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003759 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3760
Tom Herbert2df1a702011-11-28 16:33:37 +00003761 netdev_tx_sent_queue(txq, skb->len);
3762
Willem de Bruijn8373c572012-04-27 09:04:06 +00003763 skb_tx_timestamp(skb);
3764
Ariel Elior6383c0b2011-07-14 08:31:57 +00003765 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003766 /*
3767 * Make sure that the BD data is updated before updating the producer
3768 * since FW might read the BD right after the producer is updated.
3769 * This is only applicable for weak-ordered memory model archs such
3770 * as IA-64. The following barrier is also mandatory since FW will
3771 * assumes packets must have BDs.
3772 */
3773 wmb();
3774
Ariel Elior6383c0b2011-07-14 08:31:57 +00003775 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003776 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003777
Ariel Elior6383c0b2011-07-14 08:31:57 +00003778 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003779
3780 mmiowb();
3781
Ariel Elior6383c0b2011-07-14 08:31:57 +00003782 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003783
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003784 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003785 netif_tx_stop_queue(txq);
3786
3787 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3788 * ordering of set_bit() in netif_tx_stop_queue() and read of
3789 * fp->bd_tx_cons */
3790 smp_mb();
3791
Barak Witkowski15192a82012-06-19 07:48:28 +00003792 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003793 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003794 netif_tx_wake_queue(txq);
3795 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003796 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003797
3798 return NETDEV_TX_OK;
3799}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003800
Ariel Elior6383c0b2011-07-14 08:31:57 +00003801/**
3802 * bnx2x_setup_tc - routine to configure net_device for multi tc
3803 *
3804 * @netdev: net device to configure
3805 * @tc: number of traffic classes to enable
3806 *
3807 * callback connected to the ndo_setup_tc function pointer
3808 */
3809int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3810{
3811 int cos, prio, count, offset;
3812 struct bnx2x *bp = netdev_priv(dev);
3813
3814 /* setup tc must be called under rtnl lock */
3815 ASSERT_RTNL();
3816
3817 /* no traffic classes requested. aborting */
3818 if (!num_tc) {
3819 netdev_reset_tc(dev);
3820 return 0;
3821 }
3822
3823 /* requested to support too many traffic classes */
3824 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003825 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3826 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003827 return -EINVAL;
3828 }
3829
3830 /* declare amount of supported traffic classes */
3831 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003832 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003833 return -EINVAL;
3834 }
3835
3836 /* configure priority to traffic class mapping */
3837 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3838 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003839 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3840 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003841 prio, bp->prio_to_cos[prio]);
3842 }
3843
3844
3845 /* Use this configuration to diffrentiate tc0 from other COSes
3846 This can be used for ets or pfc, and save the effort of setting
3847 up a multio class queue disc or negotiating DCBX with a switch
3848 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003849 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003850 for (prio = 1; prio < 16; prio++) {
3851 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003852 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003853 } */
3854
3855 /* configure traffic class to transmission queue mapping */
3856 for (cos = 0; cos < bp->max_cos; cos++) {
3857 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003858 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003859 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003860 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3861 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003862 cos, offset, count);
3863 }
3864
3865 return 0;
3866}
3867
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003868/* called with rtnl_lock */
3869int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3870{
3871 struct sockaddr *addr = p;
3872 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003873 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003874
Merav Sicron51c1a582012-03-18 10:33:38 +00003875 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3876 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003877 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003878 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003879
Barak Witkowskia3348722012-04-23 03:04:46 +00003880 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3881 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003882 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003883 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003884 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003885
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003886 if (netif_running(dev)) {
3887 rc = bnx2x_set_eth_mac(bp, false);
3888 if (rc)
3889 return rc;
3890 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003891
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003892 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3893
3894 if (netif_running(dev))
3895 rc = bnx2x_set_eth_mac(bp, true);
3896
3897 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003898}
3899
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003900static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3901{
3902 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3903 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003904 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003905
3906 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00003907
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003908 if (IS_FCOE_IDX(fp_index)) {
3909 memset(sb, 0, sizeof(union host_hc_status_block));
3910 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003911 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003912 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003913 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003914 BNX2X_PCI_FREE(sb->e2_sb,
3915 bnx2x_fp(bp, fp_index,
3916 status_blk_mapping),
3917 sizeof(struct host_hc_status_block_e2));
3918 else
3919 BNX2X_PCI_FREE(sb->e1x_sb,
3920 bnx2x_fp(bp, fp_index,
3921 status_blk_mapping),
3922 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003923 }
Merav Sicron55c11942012-11-07 00:45:48 +00003924
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003925 /* Rx */
3926 if (!skip_rx_queue(bp, fp_index)) {
3927 bnx2x_free_rx_bds(fp);
3928
3929 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3930 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3931 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3932 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3933 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3934
3935 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3936 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3937 sizeof(struct eth_fast_path_rx_cqe) *
3938 NUM_RCQ_BD);
3939
3940 /* SGE ring */
3941 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3942 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3943 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3944 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3945 }
3946
3947 /* Tx */
3948 if (!skip_tx_queue(bp, fp_index)) {
3949 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003950 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003951 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003952
Merav Sicron51c1a582012-03-18 10:33:38 +00003953 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003954 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003955 fp_index, cos, txdata->cid);
3956
3957 BNX2X_FREE(txdata->tx_buf_ring);
3958 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3959 txdata->tx_desc_mapping,
3960 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3961 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003962 }
3963 /* end of fastpath */
3964}
3965
Merav Sicron55c11942012-11-07 00:45:48 +00003966void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3967{
3968 int i;
3969 for_each_cnic_queue(bp, i)
3970 bnx2x_free_fp_mem_at(bp, i);
3971}
3972
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003973void bnx2x_free_fp_mem(struct bnx2x *bp)
3974{
3975 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00003976 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003977 bnx2x_free_fp_mem_at(bp, i);
3978}
3979
Eric Dumazet1191cb82012-04-27 21:39:21 +00003980static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003981{
3982 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003983 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003984 bnx2x_fp(bp, index, sb_index_values) =
3985 (__le16 *)status_blk.e2_sb->sb.index_values;
3986 bnx2x_fp(bp, index, sb_running_index) =
3987 (__le16 *)status_blk.e2_sb->sb.running_index;
3988 } else {
3989 bnx2x_fp(bp, index, sb_index_values) =
3990 (__le16 *)status_blk.e1x_sb->sb.index_values;
3991 bnx2x_fp(bp, index, sb_running_index) =
3992 (__le16 *)status_blk.e1x_sb->sb.running_index;
3993 }
3994}
3995
Eric Dumazet1191cb82012-04-27 21:39:21 +00003996/* Returns the number of actually allocated BDs */
3997static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3998 int rx_ring_size)
3999{
4000 struct bnx2x *bp = fp->bp;
4001 u16 ring_prod, cqe_ring_prod;
4002 int i, failure_cnt = 0;
4003
4004 fp->rx_comp_cons = 0;
4005 cqe_ring_prod = ring_prod = 0;
4006
4007 /* This routine is called only during fo init so
4008 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4009 */
4010 for (i = 0; i < rx_ring_size; i++) {
4011 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4012 failure_cnt++;
4013 continue;
4014 }
4015 ring_prod = NEXT_RX_IDX(ring_prod);
4016 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4017 WARN_ON(ring_prod <= (i - failure_cnt));
4018 }
4019
4020 if (failure_cnt)
4021 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4022 i - failure_cnt, fp->index);
4023
4024 fp->rx_bd_prod = ring_prod;
4025 /* Limit the CQE producer by the CQE ring size */
4026 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4027 cqe_ring_prod);
4028 fp->rx_pkt = fp->rx_calls = 0;
4029
Barak Witkowski15192a82012-06-19 07:48:28 +00004030 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004031
4032 return i - failure_cnt;
4033}
4034
4035static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4036{
4037 int i;
4038
4039 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4040 struct eth_rx_cqe_next_page *nextpg;
4041
4042 nextpg = (struct eth_rx_cqe_next_page *)
4043 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4044 nextpg->addr_hi =
4045 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4046 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4047 nextpg->addr_lo =
4048 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4049 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4050 }
4051}
4052
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004053static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4054{
4055 union host_hc_status_block *sb;
4056 struct bnx2x_fastpath *fp = &bp->fp[index];
4057 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004058 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004059 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004060
Barak Witkowskia3348722012-04-23 03:04:46 +00004061 if (!bp->rx_ring_size &&
4062 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004063 rx_ring_size = MIN_RX_SIZE_NONTPA;
4064 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004065 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004066 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4067
Yuval Mintz065f8b92012-10-03 04:22:59 +00004068 if (CHIP_IS_E3(bp)) {
4069 u32 cfg = SHMEM_RD(bp,
4070 dev_info.port_hw_config[BP_PORT(bp)].
4071 default_cfg);
4072
4073 /* Decrease ring size for 1G functions */
4074 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4075 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4076 rx_ring_size /= 10;
4077 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004078
David S. Miller8decf862011-09-22 03:23:13 -04004079 /* allocate at least number of buffers required by FW */
4080 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4081 MIN_RX_SIZE_TPA, rx_ring_size);
4082
4083 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004084 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004085 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004086
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004087 /* Common */
4088 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004089
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004090 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004091 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004092 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004093 BNX2X_PCI_ALLOC(sb->e2_sb,
4094 &bnx2x_fp(bp, index, status_blk_mapping),
4095 sizeof(struct host_hc_status_block_e2));
4096 else
4097 BNX2X_PCI_ALLOC(sb->e1x_sb,
4098 &bnx2x_fp(bp, index, status_blk_mapping),
4099 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004100 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004101
4102 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4103 * set shortcuts for it.
4104 */
4105 if (!IS_FCOE_IDX(index))
4106 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004107
4108 /* Tx */
4109 if (!skip_tx_queue(bp, index)) {
4110 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004111 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004112 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004113
Merav Sicron51c1a582012-03-18 10:33:38 +00004114 DP(NETIF_MSG_IFUP,
4115 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004116 index, cos);
4117
4118 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004119 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004120 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4121 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004122 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004123 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004124 }
4125
4126 /* Rx */
4127 if (!skip_rx_queue(bp, index)) {
4128 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4129 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4130 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4131 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4132 &bnx2x_fp(bp, index, rx_desc_mapping),
4133 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4134
4135 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4136 &bnx2x_fp(bp, index, rx_comp_mapping),
4137 sizeof(struct eth_fast_path_rx_cqe) *
4138 NUM_RCQ_BD);
4139
4140 /* SGE ring */
4141 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4142 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4143 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4144 &bnx2x_fp(bp, index, rx_sge_mapping),
4145 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4146 /* RX BD ring */
4147 bnx2x_set_next_page_rx_bd(fp);
4148
4149 /* CQ ring */
4150 bnx2x_set_next_page_rx_cq(fp);
4151
4152 /* BDs */
4153 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4154 if (ring_size < rx_ring_size)
4155 goto alloc_mem_err;
4156 }
4157
4158 return 0;
4159
4160/* handles low memory cases */
4161alloc_mem_err:
4162 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4163 index, ring_size);
4164 /* FW will drop all packets if queue is not big enough,
4165 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004166 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004167 */
4168 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004169 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004170 /* release memory allocated for this queue */
4171 bnx2x_free_fp_mem_at(bp, index);
4172 return -ENOMEM;
4173 }
4174 return 0;
4175}
4176
Merav Sicron55c11942012-11-07 00:45:48 +00004177int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004178{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004179 if (!NO_FCOE(bp))
4180 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004181 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004182 /* we will fail load process instead of mark
4183 * NO_FCOE_FLAG
4184 */
4185 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004186
4187 return 0;
4188}
4189
4190int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4191{
4192 int i;
4193
4194 /* 1. Allocate FP for leading - fatal if error
4195 * 2. Allocate RSS - fix number of queues if error
4196 */
4197
4198 /* leading */
4199 if (bnx2x_alloc_fp_mem_at(bp, 0))
4200 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004201
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004202 /* RSS */
4203 for_each_nondefault_eth_queue(bp, i)
4204 if (bnx2x_alloc_fp_mem_at(bp, i))
4205 break;
4206
4207 /* handle memory failures */
4208 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4209 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4210
4211 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004212 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004213 if (CNIC_SUPPORT(bp))
4214 /* move non eth FPs next to last eth FP
4215 * must be done in that order
4216 * FCOE_IDX < FWD_IDX < OOO_IDX
4217 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004218
Merav Sicron55c11942012-11-07 00:45:48 +00004219 /* move FCoE fp even NO_FCOE_FLAG is on */
4220 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4221 bp->num_ethernet_queues -= delta;
4222 bp->num_queues = bp->num_ethernet_queues +
4223 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004224 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4225 bp->num_queues + delta, bp->num_queues);
4226 }
4227
4228 return 0;
4229}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004230
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004231void bnx2x_free_mem_bp(struct bnx2x *bp)
4232{
Barak Witkowski15192a82012-06-19 07:48:28 +00004233 kfree(bp->fp->tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004234 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004235 kfree(bp->sp_objs);
4236 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004237 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004238 kfree(bp->msix_table);
4239 kfree(bp->ilt);
4240}
4241
Bill Pemberton0329aba2012-12-03 09:24:24 -05004242int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004243{
4244 struct bnx2x_fastpath *fp;
4245 struct msix_entry *tbl;
4246 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004247 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004248 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004249 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004250
Ariel Elior6383c0b2011-07-14 08:31:57 +00004251 /*
4252 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004253 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004254 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004255 msix_table_size = bp->igu_sb_cnt;
4256 if (IS_PF(bp))
4257 msix_table_size++;
4258 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004259
4260 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004261 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004262 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4263
4264 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004265 if (!fp)
4266 goto alloc_err;
Barak Witkowski15192a82012-06-19 07:48:28 +00004267 for (i = 0; i < fp_array_size; i++) {
4268 fp[i].tpa_info =
4269 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4270 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4271 if (!(fp[i].tpa_info))
4272 goto alloc_err;
4273 }
4274
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004275 bp->fp = fp;
4276
Barak Witkowski15192a82012-06-19 07:48:28 +00004277 /* allocate sp objs */
4278 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4279 GFP_KERNEL);
4280 if (!bp->sp_objs)
4281 goto alloc_err;
4282
4283 /* allocate fp_stats */
4284 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4285 GFP_KERNEL);
4286 if (!bp->fp_stats)
4287 goto alloc_err;
4288
Merav Sicron65565882012-06-19 07:48:26 +00004289 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004290 txq_array_size =
4291 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4292 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4293
4294 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4295 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004296 if (!bp->bnx2x_txq)
4297 goto alloc_err;
4298
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004299 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004300 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004301 if (!tbl)
4302 goto alloc_err;
4303 bp->msix_table = tbl;
4304
4305 /* ilt */
4306 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4307 if (!ilt)
4308 goto alloc_err;
4309 bp->ilt = ilt;
4310
4311 return 0;
4312alloc_err:
4313 bnx2x_free_mem_bp(bp);
4314 return -ENOMEM;
4315
4316}
4317
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004318int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004319{
4320 struct bnx2x *bp = netdev_priv(dev);
4321
4322 if (unlikely(!netif_running(dev)))
4323 return 0;
4324
Yuval Mintz5d07d862012-09-13 02:56:21 +00004325 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004326 return bnx2x_nic_load(bp, LOAD_NORMAL);
4327}
4328
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004329int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4330{
4331 u32 sel_phy_idx = 0;
4332 if (bp->link_params.num_phys <= 1)
4333 return INT_PHY;
4334
4335 if (bp->link_vars.link_up) {
4336 sel_phy_idx = EXT_PHY1;
4337 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4338 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4339 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4340 sel_phy_idx = EXT_PHY2;
4341 } else {
4342
4343 switch (bnx2x_phy_selection(&bp->link_params)) {
4344 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4345 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4346 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4347 sel_phy_idx = EXT_PHY1;
4348 break;
4349 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4350 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4351 sel_phy_idx = EXT_PHY2;
4352 break;
4353 }
4354 }
4355
4356 return sel_phy_idx;
4357
4358}
4359int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4360{
4361 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4362 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004363 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004364 * swapping is enabled). So when swapping is enabled, we need to reverse
4365 * the configuration
4366 */
4367
4368 if (bp->link_params.multi_phy_config &
4369 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4370 if (sel_phy_idx == EXT_PHY1)
4371 sel_phy_idx = EXT_PHY2;
4372 else if (sel_phy_idx == EXT_PHY2)
4373 sel_phy_idx = EXT_PHY1;
4374 }
4375 return LINK_CONFIG_IDX(sel_phy_idx);
4376}
4377
Merav Sicron55c11942012-11-07 00:45:48 +00004378#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004379int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4380{
4381 struct bnx2x *bp = netdev_priv(dev);
4382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4383
4384 switch (type) {
4385 case NETDEV_FCOE_WWNN:
4386 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4387 cp->fcoe_wwn_node_name_lo);
4388 break;
4389 case NETDEV_FCOE_WWPN:
4390 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4391 cp->fcoe_wwn_port_name_lo);
4392 break;
4393 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004394 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004395 return -EINVAL;
4396 }
4397
4398 return 0;
4399}
4400#endif
4401
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004402/* called with rtnl_lock */
4403int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4404{
4405 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004406
4407 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004408 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004409 return -EAGAIN;
4410 }
4411
4412 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004413 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4414 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004415 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004416 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004417
4418 /* This does not race with packet allocation
4419 * because the actual alloc size is
4420 * only updated as part of load
4421 */
4422 dev->mtu = new_mtu;
4423
Michał Mirosław66371c42011-04-12 09:38:23 +00004424 return bnx2x_reload_if_running(dev);
4425}
4426
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004427netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004428 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004429{
4430 struct bnx2x *bp = netdev_priv(dev);
4431
4432 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004433 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004434 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004435 features &= ~NETIF_F_GRO;
4436 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004437
4438 return features;
4439}
4440
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004441int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004442{
4443 struct bnx2x *bp = netdev_priv(dev);
4444 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004445 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004446
4447 if (features & NETIF_F_LRO)
4448 flags |= TPA_ENABLE_FLAG;
4449 else
4450 flags &= ~TPA_ENABLE_FLAG;
4451
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004452 if (features & NETIF_F_GRO)
4453 flags |= GRO_ENABLE_FLAG;
4454 else
4455 flags &= ~GRO_ENABLE_FLAG;
4456
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004457 if (features & NETIF_F_LOOPBACK) {
4458 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4459 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4460 bnx2x_reload = true;
4461 }
4462 } else {
4463 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4464 bp->link_params.loopback_mode = LOOPBACK_NONE;
4465 bnx2x_reload = true;
4466 }
4467 }
4468
Michał Mirosław66371c42011-04-12 09:38:23 +00004469 if (flags ^ bp->flags) {
4470 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004471 bnx2x_reload = true;
4472 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004473
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004474 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004475 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4476 return bnx2x_reload_if_running(dev);
4477 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004478 }
4479
Michał Mirosław66371c42011-04-12 09:38:23 +00004480 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004481}
4482
4483void bnx2x_tx_timeout(struct net_device *dev)
4484{
4485 struct bnx2x *bp = netdev_priv(dev);
4486
4487#ifdef BNX2X_STOP_ON_ERROR
4488 if (!bp->panic)
4489 bnx2x_panic();
4490#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004491
4492 smp_mb__before_clear_bit();
4493 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4494 smp_mb__after_clear_bit();
4495
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004496 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004497 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004498}
4499
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004500int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4501{
4502 struct net_device *dev = pci_get_drvdata(pdev);
4503 struct bnx2x *bp;
4504
4505 if (!dev) {
4506 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4507 return -ENODEV;
4508 }
4509 bp = netdev_priv(dev);
4510
4511 rtnl_lock();
4512
4513 pci_save_state(pdev);
4514
4515 if (!netif_running(dev)) {
4516 rtnl_unlock();
4517 return 0;
4518 }
4519
4520 netif_device_detach(dev);
4521
Yuval Mintz5d07d862012-09-13 02:56:21 +00004522 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004523
4524 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4525
4526 rtnl_unlock();
4527
4528 return 0;
4529}
4530
4531int bnx2x_resume(struct pci_dev *pdev)
4532{
4533 struct net_device *dev = pci_get_drvdata(pdev);
4534 struct bnx2x *bp;
4535 int rc;
4536
4537 if (!dev) {
4538 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4539 return -ENODEV;
4540 }
4541 bp = netdev_priv(dev);
4542
4543 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004544 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004545 return -EAGAIN;
4546 }
4547
4548 rtnl_lock();
4549
4550 pci_restore_state(pdev);
4551
4552 if (!netif_running(dev)) {
4553 rtnl_unlock();
4554 return 0;
4555 }
4556
4557 bnx2x_set_power_state(bp, PCI_D0);
4558 netif_device_attach(dev);
4559
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004560 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4561
4562 rtnl_unlock();
4563
4564 return rc;
4565}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004566
4567
4568void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4569 u32 cid)
4570{
4571 /* ustorm cxt validation */
4572 cxt->ustorm_ag_context.cdu_usage =
4573 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4574 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4575 /* xcontext validation */
4576 cxt->xstorm_ag_context.cdu_reserved =
4577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4578 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4579}
4580
Eric Dumazet1191cb82012-04-27 21:39:21 +00004581static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4582 u8 fw_sb_id, u8 sb_index,
4583 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004584{
4585
4586 u32 addr = BAR_CSTRORM_INTMEM +
4587 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4588 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004589 DP(NETIF_MSG_IFUP,
4590 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4591 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004592}
4593
Eric Dumazet1191cb82012-04-27 21:39:21 +00004594static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4595 u16 fw_sb_id, u8 sb_index,
4596 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004597{
4598 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4599 u32 addr = BAR_CSTRORM_INTMEM +
4600 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4601 u16 flags = REG_RD16(bp, addr);
4602 /* clear and set */
4603 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4604 flags |= enable_flag;
4605 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004606 DP(NETIF_MSG_IFUP,
4607 "port %x fw_sb_id %d sb_index %d disable %d\n",
4608 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004609}
4610
4611void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4612 u8 sb_index, u8 disable, u16 usec)
4613{
4614 int port = BP_PORT(bp);
4615 u8 ticks = usec / BNX2X_BTR;
4616
4617 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4618
4619 disable = disable ? 1 : (usec ? 0 : 1);
4620 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4621}