blob: b180a03790038e4ae073116ac323431026f7d2ec [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000026#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000027#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000028#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000029#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000030
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000031/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032 * bnx2x_move_fp - move content of the fastpath structure.
33 *
34 * @bp: driver handle
35 * @from: source FP index
36 * @to: destination FP index
37 *
38 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000039 * intact. This is done by first copying the napi struct from
40 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000041 * source onto the target. Update txdata pointers and related
42 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000043 */
44static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
45{
46 struct bnx2x_fastpath *from_fp = &bp->fp[from];
47 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000048 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
49 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
50 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
51 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000052 int old_max_eth_txqs, new_max_eth_txqs;
53 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000054
55 /* Copy the NAPI object as it has been already initialized */
56 from_fp->napi = to_fp->napi;
57
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000058 /* Move bnx2x_fastpath contents */
59 memcpy(to_fp, from_fp, sizeof(*to_fp));
60 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000061
Barak Witkowski15192a82012-06-19 07:48:28 +000062 /* move sp_objs contents as well, as their indices match fp ones */
63 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
64
65 /* move fp_stats contents as well, as their indices match fp ones */
66 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
67
Merav Sicron65565882012-06-19 07:48:26 +000068 /* Update txdata pointers in fp and move txdata content accordingly:
69 * Each fp consumes 'max_cos' txdata structures, so the index should be
70 * decremented by max_cos x delta.
71 */
72
73 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
74 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
75 (bp)->max_cos;
76 if (from == FCOE_IDX(bp)) {
77 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
78 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 }
80
Yuval Mintz4864a162013-01-10 04:53:39 +000081 memcpy(&bp->bnx2x_txq[new_txdata_index],
82 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +000083 sizeof(struct bnx2x_fp_txdata));
84 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000085}
86
Ariel Elior8ca5e172013-01-01 05:22:34 +000087/**
88 * bnx2x_fill_fw_str - Fill buffer with FW version string.
89 *
90 * @bp: driver handle
91 * @buf: character buffer to fill with the fw name
92 * @buf_len: length of the above buffer
93 *
94 */
95void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
96{
97 if (IS_PF(bp)) {
98 u8 phy_fw_ver[PHY_FW_VER_LEN];
99
100 phy_fw_ver[0] = '\0';
101 bnx2x_get_ext_phy_fw_version(&bp->link_params,
102 phy_fw_ver, PHY_FW_VER_LEN);
103 strlcpy(buf, bp->fw_ver, buf_len);
104 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
105 "bc %d.%d.%d%s%s",
106 (bp->common.bc_ver & 0xff0000) >> 16,
107 (bp->common.bc_ver & 0xff00) >> 8,
108 (bp->common.bc_ver & 0xff),
109 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
110 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000111 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000112 }
113}
114
David S. Miller4b87f922013-01-15 15:05:59 -0500115/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000116 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
117 *
118 * @bp: driver handle
119 * @delta: number of eth queues which were not allocated
120 */
121static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
122{
123 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
124
125 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
126 * backward along the array could cause memory to be overriden
127 */
128 for (cos = 1; cos < bp->max_cos; cos++) {
129 for (i = 0; i < old_eth_num - delta; i++) {
130 struct bnx2x_fastpath *fp = &bp->fp[i];
131 int new_idx = cos * (old_eth_num - delta) + i;
132
133 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
134 sizeof(struct bnx2x_fp_txdata));
135 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
136 }
137 }
138}
139
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300140int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
141
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000142/* free skb in the packet ring at pos idx
143 * return idx of last bd freed
144 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000145static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000146 u16 idx, unsigned int *pkts_compl,
147 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000148{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000149 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000150 struct eth_tx_start_bd *tx_start_bd;
151 struct eth_tx_bd *tx_data_bd;
152 struct sk_buff *skb = tx_buf->skb;
153 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
154 int nbd;
155
156 /* prefetch skb end pointer to speedup dev_kfree_skb() */
157 prefetch(&skb->end);
158
Merav Sicron51c1a582012-03-18 10:33:38 +0000159 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000160 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000161
162 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000163 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000164 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000165 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000166
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300167
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
169#ifdef BNX2X_STOP_ON_ERROR
170 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
171 BNX2X_ERR("BAD nbd!\n");
172 bnx2x_panic();
173 }
174#endif
175 new_cons = nbd + tx_buf->first_bd;
176
177 /* Get the next bd */
178 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
179
180 /* Skip a parse bd... */
181 --nbd;
182 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
183
184 /* ...and the TSO split header bd since they have no mapping */
185 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
186 --nbd;
187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
188 }
189
190 /* now free frags */
191 while (nbd > 0) {
192
Ariel Elior6383c0b2011-07-14 08:31:57 +0000193 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
195 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
196 if (--nbd)
197 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
198 }
199
200 /* release skb */
201 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000202 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000203 (*pkts_compl)++;
204 (*bytes_compl) += skb->len;
205 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000206
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000207 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000208 tx_buf->first_bd = 0;
209 tx_buf->skb = NULL;
210
211 return new_cons;
212}
213
Ariel Elior6383c0b2011-07-14 08:31:57 +0000214int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000215{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000217 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000218 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000219
220#ifdef BNX2X_STOP_ON_ERROR
221 if (unlikely(bp->panic))
222 return -1;
223#endif
224
Ariel Elior6383c0b2011-07-14 08:31:57 +0000225 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
226 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
227 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000228
229 while (sw_cons != hw_cons) {
230 u16 pkt_cons;
231
232 pkt_cons = TX_BD(sw_cons);
233
Merav Sicron51c1a582012-03-18 10:33:38 +0000234 DP(NETIF_MSG_TX_DONE,
235 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000236 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000237
Tom Herbert2df1a702011-11-28 16:33:37 +0000238 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
239 &pkts_compl, &bytes_compl);
240
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000241 sw_cons++;
242 }
243
Tom Herbert2df1a702011-11-28 16:33:37 +0000244 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
245
Ariel Elior6383c0b2011-07-14 08:31:57 +0000246 txdata->tx_pkt_cons = sw_cons;
247 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000248
249 /* Need to make the tx_bd_cons update visible to start_xmit()
250 * before checking for netif_tx_queue_stopped(). Without the
251 * memory barrier, there is a small possibility that
252 * start_xmit() will miss it and cause the queue to be stopped
253 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300254 * On the other hand we need an rmb() here to ensure the proper
255 * ordering of bit testing in the following
256 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000257 */
258 smp_mb();
259
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000260 if (unlikely(netif_tx_queue_stopped(txq))) {
261 /* Taking tx_lock() is needed to prevent reenabling the queue
262 * while it's empty. This could have happen if rx_action() gets
263 * suspended in bnx2x_tx_int() after the condition before
264 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
265 *
266 * stops the queue->sees fresh tx_bd_cons->releases the queue->
267 * sends some packets consuming the whole queue again->
268 * stops the queue
269 */
270
271 __netif_tx_lock(txq, smp_processor_id());
272
273 if ((netif_tx_queue_stopped(txq)) &&
274 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000275 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000276 netif_tx_wake_queue(txq);
277
278 __netif_tx_unlock(txq);
279 }
280 return 0;
281}
282
283static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
284 u16 idx)
285{
286 u16 last_max = fp->last_max_sge;
287
288 if (SUB_S16(idx, last_max) > 0)
289 fp->last_max_sge = idx;
290}
291
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000292static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
293 u16 sge_len,
294 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000295{
296 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000297 u16 last_max, last_elem, first_elem;
298 u16 delta = 0;
299 u16 i;
300
301 if (!sge_len)
302 return;
303
304 /* First mark all used pages */
305 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300306 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000307 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000308
309 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000310 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000311
312 /* Here we assume that the last SGE index is the biggest */
313 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000314 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000315 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000316
317 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300318 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
319 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000320
321 /* If ring is not full */
322 if (last_elem + 1 != first_elem)
323 last_elem++;
324
325 /* Now update the prod */
326 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
327 if (likely(fp->sge_mask[i]))
328 break;
329
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300330 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
331 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000332 }
333
334 if (delta > 0) {
335 fp->rx_sge_prod += delta;
336 /* clear page-end entries */
337 bnx2x_clear_sge_mask_next_elems(fp);
338 }
339
340 DP(NETIF_MSG_RX_STATUS,
341 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
342 fp->last_max_sge, fp->rx_sge_prod);
343}
344
Eric Dumazete52fcb22011-11-14 06:05:34 +0000345/* Set Toeplitz hash value in the skb using the value from the
346 * CQE (calculated by HW).
347 */
348static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000349 const struct eth_fast_path_rx_cqe *cqe,
350 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000351{
352 /* Set Toeplitz hash from CQE */
353 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000354 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
355 enum eth_rss_hash_type htype;
356
357 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
358 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
359 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000360 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000361 }
362 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000363 return 0;
364}
365
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000366static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000367 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300368 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000369{
370 struct bnx2x *bp = fp->bp;
371 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
372 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
373 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
374 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300375 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
376 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000377
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 /* print error if current state != stop */
379 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
381
Eric Dumazete52fcb22011-11-14 06:05:34 +0000382 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300383 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000384 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300385 fp->rx_buf_size, DMA_FROM_DEVICE);
386 /*
387 * ...if it fails - move the skb from the consumer to the producer
388 * and set the current aggregation state as ERROR to drop it
389 * when TPA_STOP arrives.
390 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000391
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300392 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
393 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000394 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300395 tpa_info->tpa_state = BNX2X_TPA_ERROR;
396 return;
397 }
398
Eric Dumazete52fcb22011-11-14 06:05:34 +0000399 /* move empty data from pool to prod */
400 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300401 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000402 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000403 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
404 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
405
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300406 /* move partial skb from cons to pool (don't unmap yet) */
407 *first_buf = *cons_rx_buf;
408
409 /* mark bin state as START */
410 tpa_info->parsing_flags =
411 le16_to_cpu(cqe->pars_flags.flags);
412 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
413 tpa_info->tpa_state = BNX2X_TPA_START;
414 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
415 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000416 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000417 if (fp->mode == TPA_MODE_GRO) {
418 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
419 tpa_info->full_page =
420 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
421 tpa_info->gro_size = gro_size;
422 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300423
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000441 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000442 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000443 * @bp: driver handle
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
447 *
448 * Approximate value of the MSS for this aggregation calculated using
449 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000450 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000451static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
452 u16 len_on_bd)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000453{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300454 /*
455 * TPA arrgregation won't have either IP options or TCP options
456 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000457 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300458 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
459
460 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
461 PRS_FLAG_OVERETH_IPV6)
462 hdrs_len += sizeof(struct ipv6hdr);
463 else /* IPv4 */
464 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000465
466
467 /* Check if there was a TCP timestamp, if there is it's will
468 * always be 12 bytes length: nop nop kind length echo val.
469 *
470 * Otherwise FW would close the aggregation.
471 */
472 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
473 hdrs_len += TPA_TSTAMP_OPT_LEN;
474
475 return len_on_bd - hdrs_len;
476}
477
Eric Dumazet1191cb82012-04-27 21:39:21 +0000478static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
479 struct bnx2x_fastpath *fp, u16 index)
480{
481 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
482 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
483 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
484 dma_addr_t mapping;
485
486 if (unlikely(page == NULL)) {
487 BNX2X_ERR("Can't alloc sge\n");
488 return -ENOMEM;
489 }
490
491 mapping = dma_map_page(&bp->pdev->dev, page, 0,
492 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
493 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
494 __free_pages(page, PAGES_PER_SGE_SHIFT);
495 BNX2X_ERR("Can't map sge\n");
496 return -ENOMEM;
497 }
498
499 sw_buf->page = page;
500 dma_unmap_addr_set(sw_buf, mapping, mapping);
501
502 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
503 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
504
505 return 0;
506}
507
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000508static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000509 struct bnx2x_agg_info *tpa_info,
510 u16 pages,
511 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300512 struct eth_end_agg_rx_cqe *cqe,
513 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514{
515 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000516 u32 i, frag_len, frag_size;
517 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300518 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000519 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000520
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000522
523 if (fp->mode == TPA_MODE_GRO) {
524 gro_size = tpa_info->gro_size;
525 full_page = tpa_info->full_page;
526 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000527
528 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000529 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
531 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000532
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000533 /* set for GRO */
534 if (fp->mode == TPA_MODE_GRO)
535 skb_shinfo(skb)->gso_type =
536 (GET_FLAG(tpa_info->parsing_flags,
537 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
538 PRS_FLAG_OVERETH_IPV6) ?
539 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
540 }
541
542
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543#ifdef BNX2X_STOP_ON_ERROR
544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
562 frag_len = min_t(u32, frag_size,
563 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
564
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000565 rx_pg = &fp->rx_page_ring[sge_idx];
566 old_rx_pg = *rx_pg;
567
568 /* If we fail to allocate a substitute page, we simply stop
569 where we are and drop the whole packet */
570 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
571 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000572 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000573 return err;
574 }
575
576 /* Unmap the page as we r going to pass it to the stack */
577 dma_unmap_page(&bp->pdev->dev,
578 dma_unmap_addr(&old_rx_pg, mapping),
579 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000581 if (fp->mode == TPA_MODE_LRO)
582 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
583 else { /* GRO */
584 int rem;
585 int offset = 0;
586 for (rem = frag_len; rem > 0; rem -= gro_size) {
587 int len = rem > gro_size ? gro_size : rem;
588 skb_fill_page_desc(skb, frag_id++,
589 old_rx_pg.page, offset, len);
590 if (offset)
591 get_page(old_rx_pg.page);
592 offset += len;
593 }
594 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000595
596 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000597 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598 skb->len += frag_len;
599
600 frag_size -= frag_len;
601 }
602
603 return 0;
604}
605
Eric Dumazetd46d1322012-12-10 12:16:06 +0000606static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
607{
608 if (fp->rx_frag_size)
609 put_page(virt_to_head_page(data));
610 else
611 kfree(data);
612}
613
614static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
615{
616 if (fp->rx_frag_size)
617 return netdev_alloc_frag(fp->rx_frag_size);
618
619 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
620}
621
622
Eric Dumazet1191cb82012-04-27 21:39:21 +0000623static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
624 struct bnx2x_agg_info *tpa_info,
625 u16 pages,
626 struct eth_end_agg_rx_cqe *cqe,
627 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000628{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300629 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000630 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300631 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000632 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000633 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634 u8 old_tpa_state = tpa_info->tpa_state;
635
636 tpa_info->tpa_state = BNX2X_TPA_STOP;
637
638 /* If we there was an error during the handling of the TPA_START -
639 * drop this aggregation.
640 */
641 if (old_tpa_state == BNX2X_TPA_ERROR)
642 goto drop;
643
Eric Dumazete52fcb22011-11-14 06:05:34 +0000644 /* Try to allocate the new data */
Eric Dumazetd46d1322012-12-10 12:16:06 +0000645 new_data = bnx2x_frag_alloc(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000646 /* Unmap skb in the pool anyway, as we are going to change
647 pool entry status to BNX2X_TPA_STOP even if new skb allocation
648 fails. */
649 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800650 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000651 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000652 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000653
Eric Dumazete52fcb22011-11-14 06:05:34 +0000654 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000655#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800656 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000657 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800658 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000659 bnx2x_panic();
660 return;
661 }
662#endif
663
Eric Dumazete52fcb22011-11-14 06:05:34 +0000664 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000665 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000666 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000667 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000668
669 skb->protocol = eth_type_trans(skb, bp->dev);
670 skb->ip_summed = CHECKSUM_UNNECESSARY;
671
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000672 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
673 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300674 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
675 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000676 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000677 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000678 DP(NETIF_MSG_RX_STATUS,
679 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000680 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000681 }
682
683
Eric Dumazete52fcb22011-11-14 06:05:34 +0000684 /* put new data in bin */
685 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000686
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300687 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000688 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000689 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300690drop:
691 /* drop the packet and keep the buffer in the bin */
692 DP(NETIF_MSG_RX_STATUS,
693 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000694 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000695}
696
Eric Dumazet1191cb82012-04-27 21:39:21 +0000697static int bnx2x_alloc_rx_data(struct bnx2x *bp,
698 struct bnx2x_fastpath *fp, u16 index)
699{
700 u8 *data;
701 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
702 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
703 dma_addr_t mapping;
704
Eric Dumazetd46d1322012-12-10 12:16:06 +0000705 data = bnx2x_frag_alloc(fp);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000706 if (unlikely(data == NULL))
707 return -ENOMEM;
708
709 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
710 fp->rx_buf_size,
711 DMA_FROM_DEVICE);
712 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000713 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000714 BNX2X_ERR("Can't map rx data\n");
715 return -ENOMEM;
716 }
717
718 rx_buf->data = data;
719 dma_unmap_addr_set(rx_buf, mapping, mapping);
720
721 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
722 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
723
724 return 0;
725}
726
Barak Witkowski15192a82012-06-19 07:48:28 +0000727static
728void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
729 struct bnx2x_fastpath *fp,
730 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000731{
Michal Schmidte4889212012-09-13 12:59:44 +0000732 /* Do nothing if no L4 csum validation was done.
733 * We do not check whether IP csum was validated. For IPv4 we assume
734 * that if the card got as far as validating the L4 csum, it also
735 * validated the IP csum. IPv6 has no IP csum.
736 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000737 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000738 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000739 return;
740
Michal Schmidte4889212012-09-13 12:59:44 +0000741 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000742
743 if (cqe->fast_path_cqe.type_error_flags &
744 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
745 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000746 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000747 else
748 skb->ip_summed = CHECKSUM_UNNECESSARY;
749}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000750
751int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
752{
753 struct bnx2x *bp = fp->bp;
754 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
755 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
756 int rx_pkt = 0;
757
758#ifdef BNX2X_STOP_ON_ERROR
759 if (unlikely(bp->panic))
760 return 0;
761#endif
762
763 /* CQ "next element" is of the size of the regular element,
764 that's why it's ok here */
765 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
766 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
767 hw_comp_cons++;
768
769 bd_cons = fp->rx_bd_cons;
770 bd_prod = fp->rx_bd_prod;
771 bd_prod_fw = bd_prod;
772 sw_comp_cons = fp->rx_comp_cons;
773 sw_comp_prod = fp->rx_comp_prod;
774
775 /* Memory barrier necessary as speculative reads of the rx
776 * buffer can be ahead of the index in the status block
777 */
778 rmb();
779
780 DP(NETIF_MSG_RX_STATUS,
781 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
782 fp->index, hw_comp_cons, sw_comp_cons);
783
784 while (sw_comp_cons != hw_comp_cons) {
785 struct sw_rx_bd *rx_buf = NULL;
786 struct sk_buff *skb;
787 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300788 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000789 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300790 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000791 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000792 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000793 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000794
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300795#ifdef BNX2X_STOP_ON_ERROR
796 if (unlikely(bp->panic))
797 return 0;
798#endif
799
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000800 comp_ring_cons = RCQ_BD(sw_comp_cons);
801 bd_prod = RX_BD(bd_prod);
802 bd_cons = RX_BD(bd_cons);
803
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000804 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300805 cqe_fp = &cqe->fast_path_cqe;
806 cqe_fp_flags = cqe_fp->type_error_flags;
807 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000808
Merav Sicron51c1a582012-03-18 10:33:38 +0000809 DP(NETIF_MSG_RX_STATUS,
810 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
811 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300812 cqe_fp_flags, cqe_fp->status_flags,
813 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000814 le16_to_cpu(cqe_fp->vlan_tag),
815 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000816
817 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300818 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000819 bnx2x_sp_event(fp, cqe);
820 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000821 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000822
Eric Dumazete52fcb22011-11-14 06:05:34 +0000823 rx_buf = &fp->rx_buf_ring[bd_cons];
824 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000825
Eric Dumazete52fcb22011-11-14 06:05:34 +0000826 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000827 struct bnx2x_agg_info *tpa_info;
828 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300829#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000830 /* sanity check */
831 if (fp->disable_tpa &&
832 (CQE_TYPE_START(cqe_fp_type) ||
833 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000834 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000835 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300836#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000837
Eric Dumazete52fcb22011-11-14 06:05:34 +0000838 if (CQE_TYPE_START(cqe_fp_type)) {
839 u16 queue = cqe_fp->queue_index;
840 DP(NETIF_MSG_RX_STATUS,
841 "calling tpa_start on queue %d\n",
842 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000843
Eric Dumazete52fcb22011-11-14 06:05:34 +0000844 bnx2x_tpa_start(fp, queue,
845 bd_cons, bd_prod,
846 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000847
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000848 goto next_rx;
849
850 }
851 queue = cqe->end_agg_cqe.queue_index;
852 tpa_info = &fp->tpa_info[queue];
853 DP(NETIF_MSG_RX_STATUS,
854 "calling tpa_stop on queue %d\n",
855 queue);
856
857 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
858 tpa_info->len_on_bd;
859
860 if (fp->mode == TPA_MODE_GRO)
861 pages = (frag_size + tpa_info->full_page - 1) /
862 tpa_info->full_page;
863 else
864 pages = SGE_PAGE_ALIGN(frag_size) >>
865 SGE_PAGE_SHIFT;
866
867 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
868 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000869#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000870 if (bp->panic)
871 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000872#endif
873
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000874 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
875 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000876 }
877 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000878 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000879 pad = cqe_fp->placement_offset;
880 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000882 pad + RX_COPY_THRESH,
883 DMA_FROM_DEVICE);
884 pad += NET_SKB_PAD;
885 prefetch(data + pad); /* speedup eth_type_trans() */
886 /* is this an error packet? */
887 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000888 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000889 "ERROR flags %x rx packet %u\n",
890 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000891 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000892 goto reuse_rx;
893 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000894
Eric Dumazete52fcb22011-11-14 06:05:34 +0000895 /* Since we don't have a jumbo ring
896 * copy small packets if mtu > 1500
897 */
898 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
899 (len <= RX_COPY_THRESH)) {
900 skb = netdev_alloc_skb_ip_align(bp->dev, len);
901 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000902 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000903 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000904 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000905 goto reuse_rx;
906 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000907 memcpy(skb->data, data + pad, len);
908 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
909 } else {
910 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000911 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000912 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800913 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000914 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000915 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000916 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000917 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000918 bnx2x_fp_qstats(bp, fp)->
919 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000920 goto next_rx;
921 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000922 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000923 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000924 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
925 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000926 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000927reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000928 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000929 goto next_rx;
930 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000931 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000932
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000933 skb_put(skb, len);
934 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000935
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000936 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000937 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
938 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000939
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000940 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000941
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000942 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000943 bnx2x_csum_validate(skb, cqe, fp,
944 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000945
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000946 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000947
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
949 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000950 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300951 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000952 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000953
954
955next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000956 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000957
958 bd_cons = NEXT_RX_IDX(bd_cons);
959 bd_prod = NEXT_RX_IDX(bd_prod);
960 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
961 rx_pkt++;
962next_cqe:
963 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
964 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
965
966 if (rx_pkt == budget)
967 break;
968 } /* while */
969
970 fp->rx_bd_cons = bd_cons;
971 fp->rx_bd_prod = bd_prod_fw;
972 fp->rx_comp_cons = sw_comp_cons;
973 fp->rx_comp_prod = sw_comp_prod;
974
975 /* Update producers */
976 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
977 fp->rx_sge_prod);
978
979 fp->rx_pkt += rx_pkt;
980 fp->rx_calls++;
981
982 return rx_pkt;
983}
984
985static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
986{
987 struct bnx2x_fastpath *fp = fp_cookie;
988 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000989 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990
Merav Sicron51c1a582012-03-18 10:33:38 +0000991 DP(NETIF_MSG_INTR,
992 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000993 fp->index, fp->fw_sb_id, fp->igu_sb_id);
994 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000995
996#ifdef BNX2X_STOP_ON_ERROR
997 if (unlikely(bp->panic))
998 return IRQ_HANDLED;
999#endif
1000
1001 /* Handle Rx and Tx according to MSI-X vector */
1002 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001003
1004 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001005 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001006
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001007 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001008 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1009
1010 return IRQ_HANDLED;
1011}
1012
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001013/* HW Lock for shared dual port PHYs */
1014void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1015{
1016 mutex_lock(&bp->port.phy_mutex);
1017
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001018 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001019}
1020
1021void bnx2x_release_phy_lock(struct bnx2x *bp)
1022{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001023 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001024
1025 mutex_unlock(&bp->port.phy_mutex);
1026}
1027
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001028/* calculates MF speed according to current linespeed and MF configuration */
1029u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1030{
1031 u16 line_speed = bp->link_vars.line_speed;
1032 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001033 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1034 bp->mf_config[BP_VN(bp)]);
1035
1036 /* Calculate the current MAX line speed limit for the MF
1037 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001038 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001039 if (IS_MF_SI(bp))
1040 line_speed = (line_speed * maxCfg) / 100;
1041 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001042 u16 vn_max_rate = maxCfg * 100;
1043
1044 if (vn_max_rate < line_speed)
1045 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001046 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001047 }
1048
1049 return line_speed;
1050}
1051
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001052/**
1053 * bnx2x_fill_report_data - fill link report data to report
1054 *
1055 * @bp: driver handle
1056 * @data: link state to update
1057 *
1058 * It uses a none-atomic bit operations because is called under the mutex.
1059 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001060static void bnx2x_fill_report_data(struct bnx2x *bp,
1061 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001062{
1063 u16 line_speed = bnx2x_get_mf_speed(bp);
1064
1065 memset(data, 0, sizeof(*data));
1066
1067 /* Fill the report data: efective line speed */
1068 data->line_speed = line_speed;
1069
1070 /* Link is down */
1071 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1072 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1073 &data->link_report_flags);
1074
1075 /* Full DUPLEX */
1076 if (bp->link_vars.duplex == DUPLEX_FULL)
1077 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1078
1079 /* Rx Flow Control is ON */
1080 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1081 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1082
1083 /* Tx Flow Control is ON */
1084 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1085 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1086}
1087
1088/**
1089 * bnx2x_link_report - report link status to OS.
1090 *
1091 * @bp: driver handle
1092 *
1093 * Calls the __bnx2x_link_report() under the same locking scheme
1094 * as a link/PHY state managing code to ensure a consistent link
1095 * reporting.
1096 */
1097
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098void bnx2x_link_report(struct bnx2x *bp)
1099{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001100 bnx2x_acquire_phy_lock(bp);
1101 __bnx2x_link_report(bp);
1102 bnx2x_release_phy_lock(bp);
1103}
1104
1105/**
1106 * __bnx2x_link_report - report link status to OS.
1107 *
1108 * @bp: driver handle
1109 *
1110 * None atomic inmlementation.
1111 * Should be called under the phy_lock.
1112 */
1113void __bnx2x_link_report(struct bnx2x *bp)
1114{
1115 struct bnx2x_link_report_data cur_data;
1116
1117 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001118 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001119 bnx2x_read_mf_cfg(bp);
1120
1121 /* Read the current link report info */
1122 bnx2x_fill_report_data(bp, &cur_data);
1123
1124 /* Don't report link down or exactly the same link status twice */
1125 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1126 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1127 &bp->last_reported_link.link_report_flags) &&
1128 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1129 &cur_data.link_report_flags)))
1130 return;
1131
1132 bp->link_cnt++;
1133
1134 /* We are going to report a new link parameters now -
1135 * remember the current data for the next time.
1136 */
1137 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1138
1139 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1140 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001141 netif_carrier_off(bp->dev);
1142 netdev_err(bp->dev, "NIC Link is Down\n");
1143 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001144 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001145 const char *duplex;
1146 const char *flow;
1147
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001148 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001149
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001150 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1151 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001152 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001153 else
Joe Perches94f05b02011-08-14 12:16:20 +00001154 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001155
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001156 /* Handle the FC at the end so that only these flags would be
1157 * possibly set. This way we may easily check if there is no FC
1158 * enabled.
1159 */
1160 if (cur_data.link_report_flags) {
1161 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1162 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001163 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1164 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001165 flow = "ON - receive & transmit";
1166 else
1167 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001168 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001169 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001170 }
Joe Perches94f05b02011-08-14 12:16:20 +00001171 } else {
1172 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001173 }
Joe Perches94f05b02011-08-14 12:16:20 +00001174 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1175 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001176 }
1177}
1178
Eric Dumazet1191cb82012-04-27 21:39:21 +00001179static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1180{
1181 int i;
1182
1183 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1184 struct eth_rx_sge *sge;
1185
1186 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1187 sge->addr_hi =
1188 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1189 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1190
1191 sge->addr_lo =
1192 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1193 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1194 }
1195}
1196
1197static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1198 struct bnx2x_fastpath *fp, int last)
1199{
1200 int i;
1201
1202 for (i = 0; i < last; i++) {
1203 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1204 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1205 u8 *data = first_buf->data;
1206
1207 if (data == NULL) {
1208 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1209 continue;
1210 }
1211 if (tpa_info->tpa_state == BNX2X_TPA_START)
1212 dma_unmap_single(&bp->pdev->dev,
1213 dma_unmap_addr(first_buf, mapping),
1214 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001215 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001216 first_buf->data = NULL;
1217 }
1218}
1219
Merav Sicron55c11942012-11-07 00:45:48 +00001220void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1221{
1222 int j;
1223
1224 for_each_rx_queue_cnic(bp, j) {
1225 struct bnx2x_fastpath *fp = &bp->fp[j];
1226
1227 fp->rx_bd_cons = 0;
1228
1229 /* Activate BD ring */
1230 /* Warning!
1231 * this will generate an interrupt (to the TSTORM)
1232 * must only be done after chip is initialized
1233 */
1234 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1235 fp->rx_sge_prod);
1236 }
1237}
1238
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001239void bnx2x_init_rx_rings(struct bnx2x *bp)
1240{
1241 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001242 u16 ring_prod;
1243 int i, j;
1244
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001245 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001246 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001247 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001248
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001249 DP(NETIF_MSG_IFUP,
1250 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1251
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001252 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001253 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001254 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001255 struct bnx2x_agg_info *tpa_info =
1256 &fp->tpa_info[i];
1257 struct sw_rx_bd *first_buf =
1258 &tpa_info->first_buf;
1259
Eric Dumazetd46d1322012-12-10 12:16:06 +00001260 first_buf->data = bnx2x_frag_alloc(fp);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001261 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001262 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1263 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001264 bnx2x_free_tpa_pool(bp, fp, i);
1265 fp->disable_tpa = 1;
1266 break;
1267 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001268 dma_unmap_addr_set(first_buf, mapping, 0);
1269 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001270 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001271
1272 /* "next page" elements initialization */
1273 bnx2x_set_next_page_sgl(fp);
1274
1275 /* set SGEs bit mask */
1276 bnx2x_init_sge_ring_bit_mask(fp);
1277
1278 /* Allocate SGEs and initialize the ring elements */
1279 for (i = 0, ring_prod = 0;
1280 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1281
1282 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001283 BNX2X_ERR("was only able to allocate %d rx sges\n",
1284 i);
1285 BNX2X_ERR("disabling TPA for queue[%d]\n",
1286 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001287 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001288 bnx2x_free_rx_sge_range(bp, fp,
1289 ring_prod);
1290 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001291 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001292 fp->disable_tpa = 1;
1293 ring_prod = 0;
1294 break;
1295 }
1296 ring_prod = NEXT_SGE_IDX(ring_prod);
1297 }
1298
1299 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300 }
1301 }
1302
Merav Sicron55c11942012-11-07 00:45:48 +00001303 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001304 struct bnx2x_fastpath *fp = &bp->fp[j];
1305
1306 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001307
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001308 /* Activate BD ring */
1309 /* Warning!
1310 * this will generate an interrupt (to the TSTORM)
1311 * must only be done after chip is initialized
1312 */
1313 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1314 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001315
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001316 if (j != 0)
1317 continue;
1318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001319 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001320 REG_WR(bp, BAR_USTRORM_INTMEM +
1321 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1322 U64_LO(fp->rx_comp_mapping));
1323 REG_WR(bp, BAR_USTRORM_INTMEM +
1324 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1325 U64_HI(fp->rx_comp_mapping));
1326 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001327 }
1328}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001329
Merav Sicron55c11942012-11-07 00:45:48 +00001330static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1331{
1332 u8 cos;
1333 struct bnx2x *bp = fp->bp;
1334
1335 for_each_cos_in_tx_queue(fp, cos) {
1336 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1337 unsigned pkts_compl = 0, bytes_compl = 0;
1338
1339 u16 sw_prod = txdata->tx_pkt_prod;
1340 u16 sw_cons = txdata->tx_pkt_cons;
1341
1342 while (sw_cons != sw_prod) {
1343 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1344 &pkts_compl, &bytes_compl);
1345 sw_cons++;
1346 }
1347
1348 netdev_tx_reset_queue(
1349 netdev_get_tx_queue(bp->dev,
1350 txdata->txq_index));
1351 }
1352}
1353
1354static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1355{
1356 int i;
1357
1358 for_each_tx_queue_cnic(bp, i) {
1359 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1360 }
1361}
1362
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1364{
1365 int i;
1366
Merav Sicron55c11942012-11-07 00:45:48 +00001367 for_each_eth_queue(bp, i) {
1368 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001369 }
1370}
1371
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001372static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1373{
1374 struct bnx2x *bp = fp->bp;
1375 int i;
1376
1377 /* ring wasn't allocated */
1378 if (fp->rx_buf_ring == NULL)
1379 return;
1380
1381 for (i = 0; i < NUM_RX_BD; i++) {
1382 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001383 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001384
Eric Dumazete52fcb22011-11-14 06:05:34 +00001385 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001386 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001387 dma_unmap_single(&bp->pdev->dev,
1388 dma_unmap_addr(rx_buf, mapping),
1389 fp->rx_buf_size, DMA_FROM_DEVICE);
1390
Eric Dumazete52fcb22011-11-14 06:05:34 +00001391 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001392 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001393 }
1394}
1395
Merav Sicron55c11942012-11-07 00:45:48 +00001396static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1397{
1398 int j;
1399
1400 for_each_rx_queue_cnic(bp, j) {
1401 bnx2x_free_rx_bds(&bp->fp[j]);
1402 }
1403}
1404
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001405static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1406{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001407 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001408
Merav Sicron55c11942012-11-07 00:45:48 +00001409 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410 struct bnx2x_fastpath *fp = &bp->fp[j];
1411
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001412 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001414 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001415 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001416 }
1417}
1418
Merav Sicron55c11942012-11-07 00:45:48 +00001419void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1420{
1421 bnx2x_free_tx_skbs_cnic(bp);
1422 bnx2x_free_rx_skbs_cnic(bp);
1423}
1424
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001425void bnx2x_free_skbs(struct bnx2x *bp)
1426{
1427 bnx2x_free_tx_skbs(bp);
1428 bnx2x_free_rx_skbs(bp);
1429}
1430
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001431void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1432{
1433 /* load old values */
1434 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1435
1436 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1437 /* leave all but MAX value */
1438 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1439
1440 /* set new MAX value */
1441 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1442 & FUNC_MF_CFG_MAX_BW_MASK;
1443
1444 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1445 }
1446}
1447
Dmitry Kravkovca924292011-06-14 01:33:08 +00001448/**
1449 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1450 *
1451 * @bp: driver handle
1452 * @nvecs: number of vectors to be released
1453 */
1454static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001455{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001456 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001457
Dmitry Kravkovca924292011-06-14 01:33:08 +00001458 if (nvecs == offset)
1459 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001460
1461 /* VFs don't have a default SB */
1462 if (IS_PF(bp)) {
1463 free_irq(bp->msix_table[offset].vector, bp->dev);
1464 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1465 bp->msix_table[offset].vector);
1466 offset++;
1467 }
Merav Sicron55c11942012-11-07 00:45:48 +00001468
1469 if (CNIC_SUPPORT(bp)) {
1470 if (nvecs == offset)
1471 return;
1472 offset++;
1473 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001474
Dmitry Kravkovca924292011-06-14 01:33:08 +00001475 for_each_eth_queue(bp, i) {
1476 if (nvecs == offset)
1477 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001478 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1479 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001480
1481 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001482 }
1483}
1484
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001485void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001486{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001487 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001488 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1489 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1490
1491 /* vfs don't have a default status block */
1492 if (IS_PF(bp))
1493 nvecs++;
1494
1495 bnx2x_free_msix_irqs(bp, nvecs);
1496 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001497 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001498 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001499}
1500
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001501int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001502{
Ariel Elior1ab44342013-01-01 05:22:23 +00001503 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001504
Ariel Elior1ab44342013-01-01 05:22:23 +00001505 /* VFs don't have a default status block */
1506 if (IS_PF(bp)) {
1507 bp->msix_table[msix_vec].entry = msix_vec;
1508 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1509 bp->msix_table[0].entry);
1510 msix_vec++;
1511 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001512
Merav Sicron55c11942012-11-07 00:45:48 +00001513 /* Cnic requires an msix vector for itself */
1514 if (CNIC_SUPPORT(bp)) {
1515 bp->msix_table[msix_vec].entry = msix_vec;
1516 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1517 msix_vec, bp->msix_table[msix_vec].entry);
1518 msix_vec++;
1519 }
1520
Ariel Elior6383c0b2011-07-14 08:31:57 +00001521 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001522 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001523 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001524 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1525 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001526 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001527 }
1528
Ariel Elior1ab44342013-01-01 05:22:23 +00001529 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1530 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001531
Ariel Elior1ab44342013-01-01 05:22:23 +00001532 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001533
1534 /*
1535 * reconfigure number of tx/rx queues according to available
1536 * MSI-X vectors
1537 */
Merav Sicron55c11942012-11-07 00:45:48 +00001538 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001539 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001540 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001541
Merav Sicron51c1a582012-03-18 10:33:38 +00001542 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001543
1544 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1545
1546 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001547 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1548 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001549 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001550 /*
1551 * decrease number of queues by number of unallocated entries
1552 */
Merav Sicron55c11942012-11-07 00:45:48 +00001553 bp->num_ethernet_queues -= diff;
1554 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001555
Merav Sicron51c1a582012-03-18 10:33:38 +00001556 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001557 bp->num_queues);
1558 } else if (rc > 0) {
1559 /* Get by with single vector */
1560 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1561 if (rc) {
1562 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1563 rc);
1564 goto no_msix;
1565 }
1566
1567 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1568 bp->flags |= USING_SINGLE_MSIX_FLAG;
1569
Merav Sicron55c11942012-11-07 00:45:48 +00001570 BNX2X_DEV_INFO("set number of queues to 1\n");
1571 bp->num_ethernet_queues = 1;
1572 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001573 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001574 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001575 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001576 }
1577
1578 bp->flags |= USING_MSIX_FLAG;
1579
1580 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001581
1582no_msix:
1583 /* fall to INTx if not enough memory */
1584 if (rc == -ENOMEM)
1585 bp->flags |= DISABLE_MSI_FLAG;
1586
1587 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001588}
1589
1590static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1591{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001592 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001593
Ariel Eliorad5afc82013-01-01 05:22:26 +00001594 /* no default status block for vf */
1595 if (IS_PF(bp)) {
1596 rc = request_irq(bp->msix_table[offset++].vector,
1597 bnx2x_msix_sp_int, 0,
1598 bp->dev->name, bp->dev);
1599 if (rc) {
1600 BNX2X_ERR("request sp irq failed\n");
1601 return -EBUSY;
1602 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001603 }
1604
Merav Sicron55c11942012-11-07 00:45:48 +00001605 if (CNIC_SUPPORT(bp))
1606 offset++;
1607
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001608 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001609 struct bnx2x_fastpath *fp = &bp->fp[i];
1610 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1611 bp->dev->name, i);
1612
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001613 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001614 bnx2x_msix_fp_int, 0, fp->name, fp);
1615 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001616 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1617 bp->msix_table[offset].vector, rc);
1618 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001619 return -EBUSY;
1620 }
1621
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001622 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001623 }
1624
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001625 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001626 if (IS_PF(bp)) {
1627 offset = 1 + CNIC_SUPPORT(bp);
1628 netdev_info(bp->dev,
1629 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1630 bp->msix_table[0].vector,
1631 0, bp->msix_table[offset].vector,
1632 i - 1, bp->msix_table[offset + i - 1].vector);
1633 } else {
1634 offset = CNIC_SUPPORT(bp);
1635 netdev_info(bp->dev,
1636 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1637 0, bp->msix_table[offset].vector,
1638 i - 1, bp->msix_table[offset + i - 1].vector);
1639 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001640 return 0;
1641}
1642
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001643int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001644{
1645 int rc;
1646
1647 rc = pci_enable_msi(bp->pdev);
1648 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001649 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001650 return -1;
1651 }
1652 bp->flags |= USING_MSI_FLAG;
1653
1654 return 0;
1655}
1656
1657static int bnx2x_req_irq(struct bnx2x *bp)
1658{
1659 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001660 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001661
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001662 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001663 flags = 0;
1664 else
1665 flags = IRQF_SHARED;
1666
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001667 if (bp->flags & USING_MSIX_FLAG)
1668 irq = bp->msix_table[0].vector;
1669 else
1670 irq = bp->pdev->irq;
1671
1672 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673}
1674
Eric Dumazet1191cb82012-04-27 21:39:21 +00001675static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001676{
1677 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001678 if (bp->flags & USING_MSIX_FLAG &&
1679 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001680 rc = bnx2x_req_msix_irqs(bp);
1681 if (rc)
1682 return rc;
1683 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001684 rc = bnx2x_req_irq(bp);
1685 if (rc) {
1686 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1687 return rc;
1688 }
1689 if (bp->flags & USING_MSI_FLAG) {
1690 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001691 netdev_info(bp->dev, "using MSI IRQ %d\n",
1692 bp->dev->irq);
1693 }
1694 if (bp->flags & USING_MSIX_FLAG) {
1695 bp->dev->irq = bp->msix_table[0].vector;
1696 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1697 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001698 }
1699 }
1700
1701 return 0;
1702}
1703
Merav Sicron55c11942012-11-07 00:45:48 +00001704static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1705{
1706 int i;
1707
1708 for_each_rx_queue_cnic(bp, i)
1709 napi_enable(&bnx2x_fp(bp, i, napi));
1710}
1711
Eric Dumazet1191cb82012-04-27 21:39:21 +00001712static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001713{
1714 int i;
1715
Merav Sicron55c11942012-11-07 00:45:48 +00001716 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001717 napi_enable(&bnx2x_fp(bp, i, napi));
1718}
1719
Merav Sicron55c11942012-11-07 00:45:48 +00001720static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1721{
1722 int i;
1723
1724 for_each_rx_queue_cnic(bp, i)
1725 napi_disable(&bnx2x_fp(bp, i, napi));
1726}
1727
Eric Dumazet1191cb82012-04-27 21:39:21 +00001728static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001729{
1730 int i;
1731
Merav Sicron55c11942012-11-07 00:45:48 +00001732 for_each_eth_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001733 napi_disable(&bnx2x_fp(bp, i, napi));
1734}
1735
1736void bnx2x_netif_start(struct bnx2x *bp)
1737{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001738 if (netif_running(bp->dev)) {
1739 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001740 if (CNIC_LOADED(bp))
1741 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001742 bnx2x_int_enable(bp);
1743 if (bp->state == BNX2X_STATE_OPEN)
1744 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001745 }
1746}
1747
1748void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1749{
1750 bnx2x_int_disable_sync(bp, disable_hw);
1751 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001752 if (CNIC_LOADED(bp))
1753 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001754}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001755
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001756u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1757{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001758 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001759
Merav Sicron55c11942012-11-07 00:45:48 +00001760 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001761 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1762 u16 ether_type = ntohs(hdr->h_proto);
1763
1764 /* Skip VLAN tag if present */
1765 if (ether_type == ETH_P_8021Q) {
1766 struct vlan_ethhdr *vhdr =
1767 (struct vlan_ethhdr *)skb->data;
1768
1769 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1770 }
1771
1772 /* If ethertype is FCoE or FIP - use FCoE ring */
1773 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001774 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001775 }
Merav Sicron55c11942012-11-07 00:45:48 +00001776
David S. Miller823dcd22011-08-20 10:39:12 -07001777 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001778 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001779}
1780
Dmitry Kravkov96305232012-04-03 18:41:30 +00001781
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001782void bnx2x_set_num_queues(struct bnx2x *bp)
1783{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001784 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001785 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001786
Barak Witkowskia3348722012-04-23 03:04:46 +00001787 /* override in STORAGE SD modes */
1788 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001789 bp->num_ethernet_queues = 1;
1790
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001791 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001792 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1793 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001794
1795 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001796}
1797
David S. Miller823dcd22011-08-20 10:39:12 -07001798/**
1799 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1800 *
1801 * @bp: Driver handle
1802 *
1803 * We currently support for at most 16 Tx queues for each CoS thus we will
1804 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1805 * bp->max_cos.
1806 *
1807 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1808 * index after all ETH L2 indices.
1809 *
1810 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1811 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1812 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1813 *
1814 * The proper configuration of skb->queue_mapping is handled by
1815 * bnx2x_select_queue() and __skb_tx_hash().
1816 *
1817 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1818 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1819 */
Merav Sicron55c11942012-11-07 00:45:48 +00001820static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001821{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001822 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001823
Merav Sicron65565882012-06-19 07:48:26 +00001824 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001825 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001826
1827/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001828 if (include_cnic && !NO_FCOE(bp)) {
1829 rx++;
1830 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001831 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001832
1833 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1834 if (rc) {
1835 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1836 return rc;
1837 }
1838 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1839 if (rc) {
1840 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1841 return rc;
1842 }
1843
Merav Sicron51c1a582012-03-18 10:33:38 +00001844 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001845 tx, rx);
1846
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001847 return rc;
1848}
1849
Eric Dumazet1191cb82012-04-27 21:39:21 +00001850static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001851{
1852 int i;
1853
1854 for_each_queue(bp, i) {
1855 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001856 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001857
1858 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1859 if (IS_FCOE_IDX(i))
1860 /*
1861 * Although there are no IP frames expected to arrive to
1862 * this ring we still want to add an
1863 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1864 * overrun attack.
1865 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001866 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001867 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001868 mtu = bp->dev->mtu;
1869 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1870 IP_HEADER_ALIGNMENT_PADDING +
1871 ETH_OVREHEAD +
1872 mtu +
1873 BNX2X_FW_RX_ALIGN_END;
1874 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001875 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1876 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1877 else
1878 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001879 }
1880}
1881
Eric Dumazet1191cb82012-04-27 21:39:21 +00001882static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001883{
1884 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001885 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1886
Dmitry Kravkov96305232012-04-03 18:41:30 +00001887 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001888 * enabled
1889 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001890 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1891 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001892 bp->fp->cl_id +
1893 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001894
1895 /*
1896 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1897 * per-port, so if explicit configuration is needed , do it only
1898 * for a PMF.
1899 *
1900 * For 57712 and newer on the other hand it's a per-function
1901 * configuration.
1902 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001903 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001904}
1905
Dmitry Kravkov96305232012-04-03 18:41:30 +00001906int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001907 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001908{
Yuval Mintz3b603062012-03-18 10:33:39 +00001909 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001910
1911 /* Although RSS is meaningless when there is a single HW queue we
1912 * still need it enabled in order to have HW Rx hash generated.
1913 *
1914 * if (!is_eth_multi(bp))
1915 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1916 */
1917
Dmitry Kravkov96305232012-04-03 18:41:30 +00001918 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001919
1920 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1921
Dmitry Kravkov96305232012-04-03 18:41:30 +00001922 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001923
Dmitry Kravkov96305232012-04-03 18:41:30 +00001924 /* RSS configuration */
1925 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1926 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1927 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1928 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001929 if (rss_obj->udp_rss_v4)
1930 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1931 if (rss_obj->udp_rss_v6)
1932 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001933
Dmitry Kravkov96305232012-04-03 18:41:30 +00001934 /* Hash bits */
1935 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001936
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001937 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001938
Dmitry Kravkov96305232012-04-03 18:41:30 +00001939 if (config_hash) {
1940 /* RSS keys */
Akinobu Mita8376d0b2012-12-17 16:04:28 -08001941 prandom_bytes(params.rss_key, sizeof(params.rss_key));
Dmitry Kravkov96305232012-04-03 18:41:30 +00001942 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001943 }
1944
1945 return bnx2x_config_rss(bp, &params);
1946}
1947
Eric Dumazet1191cb82012-04-27 21:39:21 +00001948static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001949{
Yuval Mintz3b603062012-03-18 10:33:39 +00001950 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001951
1952 /* Prepare parameters for function state transitions */
1953 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1954
1955 func_params.f_obj = &bp->func_obj;
1956 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1957
1958 func_params.params.hw_init.load_phase = load_code;
1959
1960 return bnx2x_func_state_change(bp, &func_params);
1961}
1962
1963/*
1964 * Cleans the object that have internal lists without sending
1965 * ramrods. Should be run when interrutps are disabled.
1966 */
1967static void bnx2x_squeeze_objects(struct bnx2x *bp)
1968{
1969 int rc;
1970 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00001971 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00001972 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001973
1974 /***************** Cleanup MACs' object first *************************/
1975
1976 /* Wait for completion of requested */
1977 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1978 /* Perform a dry cleanup */
1979 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1980
1981 /* Clean ETH primary MAC */
1982 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00001983 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001984 &ramrod_flags);
1985 if (rc != 0)
1986 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1987
1988 /* Cleanup UC list */
1989 vlan_mac_flags = 0;
1990 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1991 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1992 &ramrod_flags);
1993 if (rc != 0)
1994 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1995
1996 /***************** Now clean mcast object *****************************/
1997 rparam.mcast_obj = &bp->mcast_obj;
1998 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1999
2000 /* Add a DEL command... */
2001 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2002 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002003 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2004 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002005
2006 /* ...and wait until all pending commands are cleared */
2007 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2008 while (rc != 0) {
2009 if (rc < 0) {
2010 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2011 rc);
2012 return;
2013 }
2014
2015 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2016 }
2017}
2018
2019#ifndef BNX2X_STOP_ON_ERROR
2020#define LOAD_ERROR_EXIT(bp, label) \
2021 do { \
2022 (bp)->state = BNX2X_STATE_ERROR; \
2023 goto label; \
2024 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002025
2026#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2027 do { \
2028 bp->cnic_loaded = false; \
2029 goto label; \
2030 } while (0)
2031#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002032#define LOAD_ERROR_EXIT(bp, label) \
2033 do { \
2034 (bp)->state = BNX2X_STATE_ERROR; \
2035 (bp)->panic = 1; \
2036 return -EBUSY; \
2037 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002038#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2039 do { \
2040 bp->cnic_loaded = false; \
2041 (bp)->panic = 1; \
2042 return -EBUSY; \
2043 } while (0)
2044#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002045
Ariel Eliorad5afc82013-01-01 05:22:26 +00002046static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002047{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002048 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2049 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2050 return;
2051}
Yuval Mintz452427b2012-03-26 20:47:07 +00002052
Ariel Eliorad5afc82013-01-01 05:22:26 +00002053static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2054{
Ariel Elior8db573b2013-01-01 05:22:37 +00002055 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002056 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002057
Ariel Eliorad5afc82013-01-01 05:22:26 +00002058 /* number of queues for statistics is number of eth queues + FCoE */
2059 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002060
Ariel Eliorad5afc82013-01-01 05:22:26 +00002061 /* Total number of FW statistics requests =
2062 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2063 * and fcoe l2 queue) stats + num of queues (which includes another 1
2064 * for fcoe l2 queue if applicable)
2065 */
2066 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2067
Ariel Elior8db573b2013-01-01 05:22:37 +00002068 /* vf stats appear in the request list, but their data is allocated by
2069 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2070 * it is used to determine where to place the vf stats queries in the
2071 * request struct
2072 */
2073 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002074 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002075
Ariel Eliorad5afc82013-01-01 05:22:26 +00002076 /* Request is built from stats_query_header and an array of
2077 * stats_query_cmd_group each of which contains
2078 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2079 * configured in the stats_query_header.
2080 */
2081 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002082 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2083 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002084 1 : 0));
2085
Ariel Elior8db573b2013-01-01 05:22:37 +00002086 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2087 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002088 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2089 num_groups * sizeof(struct stats_query_cmd_group);
2090
2091 /* Data for statistics requests + stats_counter
2092 * stats_counter holds per-STORM counters that are incremented
2093 * when STORM has finished with the current request.
2094 * memory for FCoE offloaded statistics are counted anyway,
2095 * even if they will not be sent.
2096 * VF stats are not accounted for here as the data of VF stats is stored
2097 * in memory allocated by the VF, not here.
2098 */
2099 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2100 sizeof(struct per_pf_stats) +
2101 sizeof(struct fcoe_statistics_params) +
2102 sizeof(struct per_queue_stats) * num_queue_stats +
2103 sizeof(struct stats_counter);
2104
2105 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2106 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2107
2108 /* Set shortcuts */
2109 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2110 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2111 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2112 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2113 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2114 bp->fw_stats_req_sz;
2115
2116 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2117 U64_HI(bp->fw_stats_req_mapping),
2118 U64_LO(bp->fw_stats_req_mapping));
2119 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2120 U64_HI(bp->fw_stats_data_mapping),
2121 U64_LO(bp->fw_stats_data_mapping));
2122 return 0;
2123
2124alloc_mem_err:
2125 bnx2x_free_fw_stats_mem(bp);
2126 BNX2X_ERR("Can't allocate FW stats memory\n");
2127 return -ENOMEM;
2128}
2129
2130/* send load request to mcp and analyze response */
2131static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2132{
2133 /* init fw_seq */
2134 bp->fw_seq =
2135 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2136 DRV_MSG_SEQ_NUMBER_MASK);
2137 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2138
2139 /* Get current FW pulse sequence */
2140 bp->fw_drv_pulse_wr_seq =
2141 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2142 DRV_PULSE_SEQ_MASK);
2143 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2144
2145 /* load request */
2146 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2147 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2148
2149 /* if mcp fails to respond we must abort */
2150 if (!(*load_code)) {
2151 BNX2X_ERR("MCP response failure, aborting\n");
2152 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002153 }
2154
Ariel Eliorad5afc82013-01-01 05:22:26 +00002155 /* If mcp refused (e.g. other port is in diagnostic mode) we
2156 * must abort
2157 */
2158 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2159 BNX2X_ERR("MCP refused load request, aborting\n");
2160 return -EBUSY;
2161 }
2162 return 0;
2163}
2164
2165/* check whether another PF has already loaded FW to chip. In
2166 * virtualized environments a pf from another VM may have already
2167 * initialized the device including loading FW
2168 */
2169int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2170{
2171 /* is another pf loaded on this engine? */
2172 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2173 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2174 /* build my FW version dword */
2175 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2176 (BCM_5710_FW_MINOR_VERSION << 8) +
2177 (BCM_5710_FW_REVISION_VERSION << 16) +
2178 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2179
2180 /* read loaded FW from chip */
2181 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2182
2183 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2184 loaded_fw, my_fw);
2185
2186 /* abort nic load if version mismatch */
2187 if (my_fw != loaded_fw) {
2188 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2189 loaded_fw, my_fw);
2190 return -EBUSY;
2191 }
2192 }
2193 return 0;
2194}
2195
2196/* returns the "mcp load_code" according to global load_count array */
2197static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2198{
2199 int path = BP_PATH(bp);
2200
2201 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2202 path, load_count[path][0], load_count[path][1],
2203 load_count[path][2]);
2204 load_count[path][0]++;
2205 load_count[path][1 + port]++;
2206 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2207 path, load_count[path][0], load_count[path][1],
2208 load_count[path][2]);
2209 if (load_count[path][0] == 1)
2210 return FW_MSG_CODE_DRV_LOAD_COMMON;
2211 else if (load_count[path][1 + port] == 1)
2212 return FW_MSG_CODE_DRV_LOAD_PORT;
2213 else
2214 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2215}
2216
2217/* mark PMF if applicable */
2218static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2219{
2220 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2221 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2222 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2223 bp->port.pmf = 1;
2224 /* We need the barrier to ensure the ordering between the
2225 * writing to bp->port.pmf here and reading it from the
2226 * bnx2x_periodic_task().
2227 */
2228 smp_mb();
2229 } else {
2230 bp->port.pmf = 0;
2231 }
2232
2233 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2234}
2235
2236static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2237{
2238 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2239 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2240 (bp->common.shmem2_base)) {
2241 if (SHMEM2_HAS(bp, dcc_support))
2242 SHMEM2_WR(bp, dcc_support,
2243 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2244 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2245 if (SHMEM2_HAS(bp, afex_driver_support))
2246 SHMEM2_WR(bp, afex_driver_support,
2247 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2248 }
2249
2250 /* Set AFEX default VLAN tag to an invalid value */
2251 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002252}
2253
Eric Dumazet1191cb82012-04-27 21:39:21 +00002254/**
2255 * bnx2x_bz_fp - zero content of the fastpath structure.
2256 *
2257 * @bp: driver handle
2258 * @index: fastpath index to be zeroed
2259 *
2260 * Makes sure the contents of the bp->fp[index].napi is kept
2261 * intact.
2262 */
2263static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2264{
2265 struct bnx2x_fastpath *fp = &bp->fp[index];
Barak Witkowski15192a82012-06-19 07:48:28 +00002266 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2267
Merav Sicron65565882012-06-19 07:48:26 +00002268 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002269 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002270 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002271 /* bzero bnx2x_fastpath contents */
Barak Witkowski15192a82012-06-19 07:48:28 +00002272 if (bp->stats_init) {
2273 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002274 memset(fp, 0, sizeof(*fp));
Barak Witkowski15192a82012-06-19 07:48:28 +00002275 } else {
Eric Dumazet1191cb82012-04-27 21:39:21 +00002276 /* Keep Queue statistics */
2277 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2278 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2279
2280 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2281 GFP_KERNEL);
2282 if (tmp_eth_q_stats)
Barak Witkowski15192a82012-06-19 07:48:28 +00002283 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
Eric Dumazet1191cb82012-04-27 21:39:21 +00002284 sizeof(struct bnx2x_eth_q_stats));
2285
2286 tmp_eth_q_stats_old =
2287 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2288 GFP_KERNEL);
2289 if (tmp_eth_q_stats_old)
Barak Witkowski15192a82012-06-19 07:48:28 +00002290 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00002291 sizeof(struct bnx2x_eth_q_stats_old));
2292
Barak Witkowski15192a82012-06-19 07:48:28 +00002293 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002294 memset(fp, 0, sizeof(*fp));
2295
2296 if (tmp_eth_q_stats) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002297 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2298 sizeof(struct bnx2x_eth_q_stats));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002299 kfree(tmp_eth_q_stats);
2300 }
2301
2302 if (tmp_eth_q_stats_old) {
Barak Witkowski15192a82012-06-19 07:48:28 +00002303 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
Eric Dumazet1191cb82012-04-27 21:39:21 +00002304 sizeof(struct bnx2x_eth_q_stats_old));
2305 kfree(tmp_eth_q_stats_old);
2306 }
2307
2308 }
2309
2310 /* Restore the NAPI object as it has been already initialized */
2311 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002312 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002313 fp->bp = bp;
2314 fp->index = index;
2315 if (IS_ETH_FP(fp))
2316 fp->max_cos = bp->max_cos;
2317 else
2318 /* Special queues support only one CoS */
2319 fp->max_cos = 1;
2320
Merav Sicron65565882012-06-19 07:48:26 +00002321 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002322 if (IS_FCOE_FP(fp))
2323 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002324 if (IS_ETH_FP(fp))
2325 for_each_cos_in_tx_queue(fp, cos)
2326 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2327 BNX2X_NUM_ETH_QUEUES(bp) + index];
2328
Eric Dumazet1191cb82012-04-27 21:39:21 +00002329 /*
2330 * set the tpa flag for each queue. The tpa flag determines the queue
2331 * minimal size so it must be set prior to queue memory allocation
2332 */
2333 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2334 (bp->flags & GRO_ENABLE_FLAG &&
2335 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2336 if (bp->flags & TPA_ENABLE_FLAG)
2337 fp->mode = TPA_MODE_LRO;
2338 else if (bp->flags & GRO_ENABLE_FLAG)
2339 fp->mode = TPA_MODE_GRO;
2340
Eric Dumazet1191cb82012-04-27 21:39:21 +00002341 /* We don't want TPA on an FCoE L2 ring */
2342 if (IS_FCOE_FP(fp))
2343 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002344}
2345
2346int bnx2x_load_cnic(struct bnx2x *bp)
2347{
2348 int i, rc, port = BP_PORT(bp);
2349
2350 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2351
2352 mutex_init(&bp->cnic_mutex);
2353
Ariel Eliorad5afc82013-01-01 05:22:26 +00002354 if (IS_PF(bp)) {
2355 rc = bnx2x_alloc_mem_cnic(bp);
2356 if (rc) {
2357 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2358 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2359 }
Merav Sicron55c11942012-11-07 00:45:48 +00002360 }
2361
2362 rc = bnx2x_alloc_fp_mem_cnic(bp);
2363 if (rc) {
2364 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2365 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2366 }
2367
2368 /* Update the number of queues with the cnic queues */
2369 rc = bnx2x_set_real_num_queues(bp, 1);
2370 if (rc) {
2371 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2372 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2373 }
2374
2375 /* Add all CNIC NAPI objects */
2376 bnx2x_add_all_napi_cnic(bp);
2377 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2378 bnx2x_napi_enable_cnic(bp);
2379
2380 rc = bnx2x_init_hw_func_cnic(bp);
2381 if (rc)
2382 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2383
2384 bnx2x_nic_init_cnic(bp);
2385
Ariel Eliorad5afc82013-01-01 05:22:26 +00002386 if (IS_PF(bp)) {
2387 /* Enable Timer scan */
2388 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002389
Ariel Eliorad5afc82013-01-01 05:22:26 +00002390 /* setup cnic queues */
2391 for_each_cnic_queue(bp, i) {
2392 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2393 if (rc) {
2394 BNX2X_ERR("Queue setup failed\n");
2395 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2396 }
Merav Sicron55c11942012-11-07 00:45:48 +00002397 }
2398 }
2399
2400 /* Initialize Rx filter. */
2401 netif_addr_lock_bh(bp->dev);
2402 bnx2x_set_rx_mode(bp->dev);
2403 netif_addr_unlock_bh(bp->dev);
2404
2405 /* re-read iscsi info */
2406 bnx2x_get_iscsi_info(bp);
2407 bnx2x_setup_cnic_irq_info(bp);
2408 bnx2x_setup_cnic_info(bp);
2409 bp->cnic_loaded = true;
2410 if (bp->state == BNX2X_STATE_OPEN)
2411 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2412
2413
2414 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2415
2416 return 0;
2417
2418#ifndef BNX2X_STOP_ON_ERROR
2419load_error_cnic2:
2420 /* Disable Timer scan */
2421 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2422
2423load_error_cnic1:
2424 bnx2x_napi_disable_cnic(bp);
2425 /* Update the number of queues without the cnic queues */
2426 rc = bnx2x_set_real_num_queues(bp, 0);
2427 if (rc)
2428 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2429load_error_cnic0:
2430 BNX2X_ERR("CNIC-related load failed\n");
2431 bnx2x_free_fp_mem_cnic(bp);
2432 bnx2x_free_mem_cnic(bp);
2433 return rc;
2434#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002435}
2436
2437
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002438/* must be called with rtnl_lock */
2439int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2440{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002441 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002442 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002443
Merav Sicron55c11942012-11-07 00:45:48 +00002444 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2445 DP(NETIF_MSG_IFUP,
2446 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2447
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002448#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002449 if (unlikely(bp->panic)) {
2450 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002451 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002452 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002453#endif
2454
2455 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2456
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002457 /* Set the initial link reported state to link down */
2458 bnx2x_acquire_phy_lock(bp);
2459 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2460 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2461 &bp->last_reported_link.link_report_flags);
2462 bnx2x_release_phy_lock(bp);
2463
Ariel Eliorad5afc82013-01-01 05:22:26 +00002464 if (IS_PF(bp))
2465 /* must be called before memory allocation and HW init */
2466 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002467
Ariel Elior6383c0b2011-07-14 08:31:57 +00002468 /*
2469 * Zero fastpath structures preserving invariants like napi, which are
2470 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002471 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002472 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002473 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002474 for_each_queue(bp, i)
2475 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002476 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2477 bp->num_cnic_queues) *
2478 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002479
Merav Sicron55c11942012-11-07 00:45:48 +00002480 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002481
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002482 /* Set the receive queues buffer size */
2483 bnx2x_set_rx_buf_size(bp);
2484
Ariel Eliorad5afc82013-01-01 05:22:26 +00002485 if (IS_PF(bp)) {
2486 rc = bnx2x_alloc_mem(bp);
2487 if (rc) {
2488 BNX2X_ERR("Unable to allocate bp memory\n");
2489 return rc;
2490 }
2491 }
2492
2493 /* Allocated memory for FW statistics */
2494 if (bnx2x_alloc_fw_stats_mem(bp))
2495 LOAD_ERROR_EXIT(bp, load_error0);
2496
2497 /* need to be done after alloc mem, since it's self adjusting to amount
2498 * of memory available for RSS queues
2499 */
2500 rc = bnx2x_alloc_fp_mem(bp);
2501 if (rc) {
2502 BNX2X_ERR("Unable to allocate memory for fps\n");
2503 LOAD_ERROR_EXIT(bp, load_error0);
2504 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002505
Ariel Elior8d9ac292013-01-01 05:22:27 +00002506 /* request pf to initialize status blocks */
2507 if (IS_VF(bp)) {
2508 rc = bnx2x_vfpf_init(bp);
2509 if (rc)
2510 LOAD_ERROR_EXIT(bp, load_error0);
2511 }
2512
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002513 /* As long as bnx2x_alloc_mem() may possibly update
2514 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002515 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002516 */
Merav Sicron55c11942012-11-07 00:45:48 +00002517 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002518 if (rc) {
2519 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002520 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002521 }
2522
Ariel Elior6383c0b2011-07-14 08:31:57 +00002523 /* configure multi cos mappings in kernel.
2524 * this configuration may be overriden by a multi class queue discipline
2525 * or by a dcbx negotiation result.
2526 */
2527 bnx2x_setup_tc(bp->dev, bp->max_cos);
2528
Merav Sicron26614ba2012-08-27 03:26:19 +00002529 /* Add all NAPI objects */
2530 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002531 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002532 bnx2x_napi_enable(bp);
2533
Ariel Eliorad5afc82013-01-01 05:22:26 +00002534 if (IS_PF(bp)) {
2535 /* set pf load just before approaching the MCP */
2536 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002537
Ariel Eliorad5afc82013-01-01 05:22:26 +00002538 /* if mcp exists send load request and analyze response */
2539 if (!BP_NOMCP(bp)) {
2540 /* attempt to load pf */
2541 rc = bnx2x_nic_load_request(bp, &load_code);
2542 if (rc)
2543 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002544
Ariel Eliorad5afc82013-01-01 05:22:26 +00002545 /* what did mcp say? */
2546 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2547 if (rc) {
2548 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002549 LOAD_ERROR_EXIT(bp, load_error2);
2550 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002551 } else {
2552 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002553 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002554
Ariel Eliorad5afc82013-01-01 05:22:26 +00002555 /* mark pmf if applicable */
2556 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002557
Ariel Eliorad5afc82013-01-01 05:22:26 +00002558 /* Init Function state controlling object */
2559 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002560
Ariel Eliorad5afc82013-01-01 05:22:26 +00002561 /* Initialize HW */
2562 rc = bnx2x_init_hw(bp, load_code);
2563 if (rc) {
2564 BNX2X_ERR("HW init failed, aborting\n");
2565 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2566 LOAD_ERROR_EXIT(bp, load_error2);
2567 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002568 }
2569
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002570 /* Connect to IRQs */
2571 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002572 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002573 BNX2X_ERR("setup irqs failed\n");
2574 if (IS_PF(bp))
2575 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002576 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002577 }
2578
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002579 /* Setup NIC internals and enable interrupts */
2580 bnx2x_nic_init(bp, load_code);
2581
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002582 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002583 if (IS_PF(bp)) {
2584 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002585 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002586
Ariel Eliorad5afc82013-01-01 05:22:26 +00002587 /* Set AFEX default VLAN tag to an invalid value */
2588 bp->afex_def_vlan_tag = -1;
2589 bnx2x_nic_load_afex_dcc(bp, load_code);
2590 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2591 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002592 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002593 BNX2X_ERR("Function start failed!\n");
2594 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2595
Merav Sicron55c11942012-11-07 00:45:48 +00002596 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002597 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002598
Ariel Eliorad5afc82013-01-01 05:22:26 +00002599 /* Send LOAD_DONE command to MCP */
2600 if (!BP_NOMCP(bp)) {
2601 load_code = bnx2x_fw_command(bp,
2602 DRV_MSG_CODE_LOAD_DONE, 0);
2603 if (!load_code) {
2604 BNX2X_ERR("MCP response failure, aborting\n");
2605 rc = -EBUSY;
2606 LOAD_ERROR_EXIT(bp, load_error3);
2607 }
2608 }
2609
2610 /* setup the leading queue */
2611 rc = bnx2x_setup_leading(bp);
2612 if (rc) {
2613 BNX2X_ERR("Setup leading failed!\n");
2614 LOAD_ERROR_EXIT(bp, load_error3);
2615 }
2616
2617 /* set up the rest of the queues */
2618 for_each_nondefault_eth_queue(bp, i) {
2619 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2620 if (rc) {
2621 BNX2X_ERR("Queue setup failed\n");
2622 LOAD_ERROR_EXIT(bp, load_error3);
2623 }
2624 }
2625
2626 /* setup rss */
2627 rc = bnx2x_init_rss_pf(bp);
2628 if (rc) {
2629 BNX2X_ERR("PF RSS init failed\n");
2630 LOAD_ERROR_EXIT(bp, load_error3);
2631 }
Ariel Elior8d9ac292013-01-01 05:22:27 +00002632
2633 } else { /* vf */
2634 for_each_eth_queue(bp, i) {
2635 rc = bnx2x_vfpf_setup_q(bp, i);
2636 if (rc) {
2637 BNX2X_ERR("Queue setup failed\n");
2638 LOAD_ERROR_EXIT(bp, load_error3);
2639 }
2640 }
Merav Sicron51c1a582012-03-18 10:33:38 +00002641 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002642
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002643 /* Now when Clients are configured we are ready to work */
2644 bp->state = BNX2X_STATE_OPEN;
2645
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002646 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002647 if (IS_PF(bp))
2648 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002649 else /* vf */
2650 rc = bnx2x_vfpf_set_mac(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002651 if (rc) {
2652 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002653 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002654 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002655
Ariel Eliorad5afc82013-01-01 05:22:26 +00002656 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002657 bnx2x_update_max_mf_config(bp, bp->pending_max);
2658 bp->pending_max = 0;
2659 }
2660
Ariel Eliorad5afc82013-01-01 05:22:26 +00002661 if (bp->port.pmf) {
2662 rc = bnx2x_initial_phy_init(bp, load_mode);
2663 if (rc)
2664 LOAD_ERROR_EXIT(bp, load_error3);
2665 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002666 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002667
2668 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002669
2670 /* Initialize Rx filter. */
2671 netif_addr_lock_bh(bp->dev);
2672 bnx2x_set_rx_mode(bp->dev);
2673 netif_addr_unlock_bh(bp->dev);
2674
2675 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002676 switch (load_mode) {
2677 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002678 /* Tx queue should be only reenabled */
2679 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002680 break;
2681
2682 case LOAD_OPEN:
2683 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002684 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002685 break;
2686
2687 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002688 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002689 bp->state = BNX2X_STATE_DIAG;
2690 break;
2691
2692 default:
2693 break;
2694 }
2695
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002696 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002697 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002698 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002699 bnx2x__link_status_update(bp);
2700
2701 /* start the timer */
2702 mod_timer(&bp->timer, jiffies + bp->current_interval);
2703
Merav Sicron55c11942012-11-07 00:45:48 +00002704 if (CNIC_ENABLED(bp))
2705 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002706
Ariel Eliorad5afc82013-01-01 05:22:26 +00002707 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2708 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002709 u32 val;
2710 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2711 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2712 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2713 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2714 }
2715
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002716 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002717 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002718 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002719 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002720 return -EBUSY;
2721 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002722
Barak Witkowski98768792012-06-19 07:48:31 +00002723 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2724 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2725 bnx2x_dcbx_init(bp, false);
2726
Merav Sicron55c11942012-11-07 00:45:48 +00002727 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2728
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002729 return 0;
2730
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002731#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002732load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002733 if (IS_PF(bp)) {
2734 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002735
Ariel Eliorad5afc82013-01-01 05:22:26 +00002736 /* Clean queueable objects */
2737 bnx2x_squeeze_objects(bp);
2738 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002739
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002740 /* Free SKBs, SGEs, TPA pool and driver internals */
2741 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002742 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002743 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002744
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002745 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002746 bnx2x_free_irq(bp);
2747load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002748 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002749 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2750 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2751 }
2752
2753 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002754load_error1:
2755 bnx2x_napi_disable(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002756
Ariel Elior889b9af2012-01-26 06:01:51 +00002757 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002758 if (IS_PF(bp))
2759 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002760load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002761 bnx2x_free_fp_mem(bp);
2762 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002763 bnx2x_free_mem(bp);
2764
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002765 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002766#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002767}
2768
Ariel Eliorad5afc82013-01-01 05:22:26 +00002769static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2770{
2771 u8 rc = 0, cos, i;
2772
2773 /* Wait until tx fastpath tasks complete */
2774 for_each_tx_queue(bp, i) {
2775 struct bnx2x_fastpath *fp = &bp->fp[i];
2776
2777 for_each_cos_in_tx_queue(fp, cos)
2778 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2779 if (rc)
2780 return rc;
2781 }
2782 return 0;
2783}
2784
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002785/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002786int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002787{
2788 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002789 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002790
Merav Sicron55c11942012-11-07 00:45:48 +00002791 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2792
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002793 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002794 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002795 u32 val;
2796 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2797 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2798 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2799 }
2800
Ariel Eliorad5afc82013-01-01 05:22:26 +00002801 if (IS_PF(bp) &&
2802 (bp->state == BNX2X_STATE_CLOSED ||
2803 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002804 /* We can get here if the driver has been unloaded
2805 * during parity error recovery and is either waiting for a
2806 * leader to complete or for other functions to unload and
2807 * then ifdown has been issued. In this case we want to
2808 * unload and let other functions to complete a recovery
2809 * process.
2810 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002811 bp->recovery_state = BNX2X_RECOVERY_DONE;
2812 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002813 bnx2x_release_leader_lock(bp);
2814 smp_mb();
2815
Merav Sicron51c1a582012-03-18 10:33:38 +00002816 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2817 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002818 return -EINVAL;
2819 }
2820
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002821 /*
2822 * It's important to set the bp->state to the value different from
2823 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2824 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2825 */
2826 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2827 smp_mb();
2828
Merav Sicron55c11942012-11-07 00:45:48 +00002829 if (CNIC_LOADED(bp))
2830 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2831
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002832 /* Stop Tx */
2833 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002834 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002835
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002836 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002837
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002838 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002839
Ariel Eliorad5afc82013-01-01 05:22:26 +00002840 if (IS_PF(bp)) {
2841 /* Set ALWAYS_ALIVE bit in shmem */
2842 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2843 bnx2x_drv_pulse(bp);
2844 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2845 bnx2x_save_statistics(bp);
2846 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002847
Ariel Eliorad5afc82013-01-01 05:22:26 +00002848 /* wait till consumers catch up with producers in all queues */
2849 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002850
Ariel Elior9b176b62013-01-01 05:22:28 +00002851 /* if VF indicate to PF this function is going down (PF will delete sp
2852 * elements and clear initializations
2853 */
2854 if (IS_VF(bp))
2855 bnx2x_vfpf_close_vf(bp);
2856 else if (unload_mode != UNLOAD_RECOVERY)
2857 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002858 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002859 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002860 /* Send the UNLOAD_REQUEST to the MCP */
2861 bnx2x_send_unload_req(bp, unload_mode);
2862
2863 /*
2864 * Prevent transactions to host from the functions on the
2865 * engine that doesn't reset global blocks in case of global
2866 * attention once gloabl blocks are reset and gates are opened
2867 * (the engine which leader will perform the recovery
2868 * last).
2869 */
2870 if (!CHIP_IS_E1x(bp))
2871 bnx2x_pf_disable(bp);
2872
2873 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002874 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002875 /* Delete all NAPI objects */
2876 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002877 if (CNIC_LOADED(bp))
2878 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002879 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002880 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002881
2882 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002883 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002884 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002885
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002886 /*
2887 * At this stage no more interrupts will arrive so we may safly clean
2888 * the queueable objects here in case they failed to get cleaned so far.
2889 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002890 if (IS_PF(bp))
2891 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002892
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002893 /* There should be no more pending SP commands at this stage */
2894 bp->sp_state = 0;
2895
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002896 bp->port.pmf = 0;
2897
2898 /* Free SKBs, SGEs, TPA pool and driver internals */
2899 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002900 if (CNIC_LOADED(bp))
2901 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002902 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002903 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002904
Ariel Eliorad5afc82013-01-01 05:22:26 +00002905 bnx2x_free_fp_mem(bp);
2906 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00002907 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002908
Ariel Eliorad5afc82013-01-01 05:22:26 +00002909 if (IS_PF(bp)) {
2910 bnx2x_free_mem(bp);
2911 if (CNIC_LOADED(bp))
2912 bnx2x_free_mem_cnic(bp);
2913 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002914 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00002915 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002916
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002917 /* Check if there are pending parity attentions. If there are - set
2918 * RECOVERY_IN_PROGRESS.
2919 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002920 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002921 bnx2x_set_reset_in_progress(bp);
2922
2923 /* Set RESET_IS_GLOBAL if needed */
2924 if (global)
2925 bnx2x_set_reset_global(bp);
2926 }
2927
2928
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002929 /* The last driver must disable a "close the gate" if there is no
2930 * parity attention or "process kill" pending.
2931 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002932 if (IS_PF(bp) &&
2933 !bnx2x_clear_pf_load(bp) &&
2934 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002935 bnx2x_disable_close_the_gate(bp);
2936
Merav Sicron55c11942012-11-07 00:45:48 +00002937 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2938
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002939 return 0;
2940}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002941
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2943{
2944 u16 pmcsr;
2945
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002946 /* If there is no power capability, silently succeed */
2947 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002948 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002949 return 0;
2950 }
2951
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002952 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2953
2954 switch (state) {
2955 case PCI_D0:
2956 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2957 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2958 PCI_PM_CTRL_PME_STATUS));
2959
2960 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2961 /* delay required during transition out of D3hot */
2962 msleep(20);
2963 break;
2964
2965 case PCI_D3hot:
2966 /* If there are other clients above don't
2967 shut down the power */
2968 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2969 return 0;
2970 /* Don't shut down the power for emulation and FPGA */
2971 if (CHIP_REV_IS_SLOW(bp))
2972 return 0;
2973
2974 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2975 pmcsr |= 3;
2976
2977 if (bp->wol)
2978 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2979
2980 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2981 pmcsr);
2982
2983 /* No more memory access after this point until
2984 * device is brought back to D0.
2985 */
2986 break;
2987
2988 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00002989 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002990 return -EINVAL;
2991 }
2992 return 0;
2993}
2994
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002995/*
2996 * net_device service functions
2997 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002998int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002999{
3000 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003001 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003002 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3003 napi);
3004 struct bnx2x *bp = fp->bp;
3005
3006 while (1) {
3007#ifdef BNX2X_STOP_ON_ERROR
3008 if (unlikely(bp->panic)) {
3009 napi_complete(napi);
3010 return 0;
3011 }
3012#endif
3013
Ariel Elior6383c0b2011-07-14 08:31:57 +00003014 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003015 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3016 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003017
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003018
3019 if (bnx2x_has_rx_work(fp)) {
3020 work_done += bnx2x_rx_int(fp, budget - work_done);
3021
3022 /* must not complete if we consumed full budget */
3023 if (work_done >= budget)
3024 break;
3025 }
3026
3027 /* Fall out from the NAPI loop if needed */
3028 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003029
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003030 /* No need to update SB for FCoE L2 ring as long as
3031 * it's connected to the default SB and the SB
3032 * has been updated when NAPI was scheduled.
3033 */
3034 if (IS_FCOE_FP(fp)) {
3035 napi_complete(napi);
3036 break;
3037 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003038 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003039 /* bnx2x_has_rx_work() reads the status block,
3040 * thus we need to ensure that status block indices
3041 * have been actually read (bnx2x_update_fpsb_idx)
3042 * prior to this check (bnx2x_has_rx_work) so that
3043 * we won't write the "newer" value of the status block
3044 * to IGU (if there was a DMA right after
3045 * bnx2x_has_rx_work and if there is no rmb, the memory
3046 * reading (bnx2x_update_fpsb_idx) may be postponed
3047 * to right before bnx2x_ack_sb). In this case there
3048 * will never be another interrupt until there is
3049 * another update of the status block, while there
3050 * is still unhandled work.
3051 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003052 rmb();
3053
3054 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3055 napi_complete(napi);
3056 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003057 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003058 "Update index to %d\n", fp->fp_hc_idx);
3059 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3060 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003061 IGU_INT_ENABLE, 1);
3062 break;
3063 }
3064 }
3065 }
3066
3067 return work_done;
3068}
3069
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003070/* we split the first BD into headers and data BDs
3071 * to ease the pain of our fellow microcode engineers
3072 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003073 */
3074static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003075 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003076 struct sw_tx_bd *tx_buf,
3077 struct eth_tx_start_bd **tx_bd, u16 hlen,
3078 u16 bd_prod, int nbd)
3079{
3080 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3081 struct eth_tx_bd *d_tx_bd;
3082 dma_addr_t mapping;
3083 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3084
3085 /* first fix first BD */
3086 h_tx_bd->nbd = cpu_to_le16(nbd);
3087 h_tx_bd->nbytes = cpu_to_le16(hlen);
3088
Merav Sicron51c1a582012-03-18 10:33:38 +00003089 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3090 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003091
3092 /* now get a new data BD
3093 * (after the pbd) and fill it */
3094 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003095 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003096
3097 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3098 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3099
3100 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3101 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3102 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3103
3104 /* this marks the BD as one that has no individual mapping */
3105 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3106
3107 DP(NETIF_MSG_TX_QUEUED,
3108 "TSO split data size is %d (%x:%x)\n",
3109 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3110
3111 /* update tx_bd */
3112 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3113
3114 return bd_prod;
3115}
3116
3117static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3118{
3119 if (fix > 0)
3120 csum = (u16) ~csum_fold(csum_sub(csum,
3121 csum_partial(t_header - fix, fix, 0)));
3122
3123 else if (fix < 0)
3124 csum = (u16) ~csum_fold(csum_add(csum,
3125 csum_partial(t_header, -fix, 0)));
3126
3127 return swab16(csum);
3128}
3129
3130static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3131{
3132 u32 rc;
3133
3134 if (skb->ip_summed != CHECKSUM_PARTIAL)
3135 rc = XMIT_PLAIN;
3136
3137 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00003138 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003139 rc = XMIT_CSUM_V6;
3140 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3141 rc |= XMIT_CSUM_TCP;
3142
3143 } else {
3144 rc = XMIT_CSUM_V4;
3145 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3146 rc |= XMIT_CSUM_TCP;
3147 }
3148 }
3149
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00003150 if (skb_is_gso_v6(skb))
3151 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3152 else if (skb_is_gso(skb))
3153 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003154
3155 return rc;
3156}
3157
3158#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3159/* check if packet requires linearization (packet is too fragmented)
3160 no need to check fragmentation if page size > 8K (there will be no
3161 violation to FW restrictions) */
3162static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3163 u32 xmit_type)
3164{
3165 int to_copy = 0;
3166 int hlen = 0;
3167 int first_bd_sz = 0;
3168
3169 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3170 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3171
3172 if (xmit_type & XMIT_GSO) {
3173 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3174 /* Check if LSO packet needs to be copied:
3175 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3176 int wnd_size = MAX_FETCH_BD - 3;
3177 /* Number of windows to check */
3178 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3179 int wnd_idx = 0;
3180 int frag_idx = 0;
3181 u32 wnd_sum = 0;
3182
3183 /* Headers length */
3184 hlen = (int)(skb_transport_header(skb) - skb->data) +
3185 tcp_hdrlen(skb);
3186
3187 /* Amount of data (w/o headers) on linear part of SKB*/
3188 first_bd_sz = skb_headlen(skb) - hlen;
3189
3190 wnd_sum = first_bd_sz;
3191
3192 /* Calculate the first sum - it's special */
3193 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3194 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003195 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003196
3197 /* If there was data on linear skb data - check it */
3198 if (first_bd_sz > 0) {
3199 if (unlikely(wnd_sum < lso_mss)) {
3200 to_copy = 1;
3201 goto exit_lbl;
3202 }
3203
3204 wnd_sum -= first_bd_sz;
3205 }
3206
3207 /* Others are easier: run through the frag list and
3208 check all windows */
3209 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3210 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003211 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003212
3213 if (unlikely(wnd_sum < lso_mss)) {
3214 to_copy = 1;
3215 break;
3216 }
3217 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003218 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003219 }
3220 } else {
3221 /* in non-LSO too fragmented packet should always
3222 be linearized */
3223 to_copy = 1;
3224 }
3225 }
3226
3227exit_lbl:
3228 if (unlikely(to_copy))
3229 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003230 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003231 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3232 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3233
3234 return to_copy;
3235}
3236#endif
3237
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003238static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3239 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003240{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003241 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3242 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3243 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003244 if ((xmit_type & XMIT_GSO_V6) &&
3245 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003246 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003247}
3248
3249/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003250 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003251 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003252 * @skb: packet skb
3253 * @pbd: parse BD
3254 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003255 */
3256static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3257 struct eth_tx_parse_bd_e1x *pbd,
3258 u32 xmit_type)
3259{
3260 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3261 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3262 pbd->tcp_flags = pbd_tcp_flags(skb);
3263
3264 if (xmit_type & XMIT_GSO_V4) {
3265 pbd->ip_id = swab16(ip_hdr(skb)->id);
3266 pbd->tcp_pseudo_csum =
3267 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3268 ip_hdr(skb)->daddr,
3269 0, IPPROTO_TCP, 0));
3270
3271 } else
3272 pbd->tcp_pseudo_csum =
3273 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3274 &ipv6_hdr(skb)->daddr,
3275 0, IPPROTO_TCP, 0));
3276
3277 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3278}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003279
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003280/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003281 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003282 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003283 * @bp: driver handle
3284 * @skb: packet skb
3285 * @parsing_data: data to be updated
3286 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003287 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003288 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003289 */
3290static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003291 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003292{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003293 *parsing_data |=
3294 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3295 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3296 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003297
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003298 if (xmit_type & XMIT_CSUM_TCP) {
3299 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3300 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3301 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003302
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003303 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3304 } else
3305 /* We support checksum offload for TCP and UDP only.
3306 * No need to pass the UDP header length - it's a constant.
3307 */
3308 return skb_transport_header(skb) +
3309 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003310}
3311
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003312static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3313 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3314{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003315 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3316
3317 if (xmit_type & XMIT_CSUM_V4)
3318 tx_start_bd->bd_flags.as_bitfield |=
3319 ETH_TX_BD_FLAGS_IP_CSUM;
3320 else
3321 tx_start_bd->bd_flags.as_bitfield |=
3322 ETH_TX_BD_FLAGS_IPV6;
3323
3324 if (!(xmit_type & XMIT_CSUM_TCP))
3325 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003326}
3327
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003328/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003329 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003330 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003331 * @bp: driver handle
3332 * @skb: packet skb
3333 * @pbd: parse BD to be updated
3334 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003335 */
3336static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3337 struct eth_tx_parse_bd_e1x *pbd,
3338 u32 xmit_type)
3339{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003340 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003341
3342 /* for now NS flag is not used in Linux */
3343 pbd->global_data =
3344 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3345 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3346
3347 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003348 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003349
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003350 hlen += pbd->ip_hlen_w;
3351
3352 /* We support checksum offload for TCP and UDP only */
3353 if (xmit_type & XMIT_CSUM_TCP)
3354 hlen += tcp_hdrlen(skb) / 2;
3355 else
3356 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003357
3358 pbd->total_hlen_w = cpu_to_le16(hlen);
3359 hlen = hlen*2;
3360
3361 if (xmit_type & XMIT_CSUM_TCP) {
3362 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3363
3364 } else {
3365 s8 fix = SKB_CS_OFF(skb); /* signed! */
3366
3367 DP(NETIF_MSG_TX_QUEUED,
3368 "hlen %d fix %d csum before fix %x\n",
3369 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3370
3371 /* HW bug: fixup the CSUM */
3372 pbd->tcp_pseudo_csum =
3373 bnx2x_csum_fix(skb_transport_header(skb),
3374 SKB_CS(skb), fix);
3375
3376 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3377 pbd->tcp_pseudo_csum);
3378 }
3379
3380 return hlen;
3381}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003382
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003383/* called with netif_tx_lock
3384 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3385 * netif_wake_queue()
3386 */
3387netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3388{
3389 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003390
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003391 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003392 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003393 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003394 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003395 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003396 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003397 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003398 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003399 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003400 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003401 dma_addr_t mapping;
3402 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3403 int i;
3404 u8 hlen = 0;
3405 __le16 pkt_size = 0;
3406 struct ethhdr *eth;
3407 u8 mac_type = UNICAST_ADDRESS;
3408
3409#ifdef BNX2X_STOP_ON_ERROR
3410 if (unlikely(bp->panic))
3411 return NETDEV_TX_BUSY;
3412#endif
3413
Ariel Elior6383c0b2011-07-14 08:31:57 +00003414 txq_index = skb_get_queue_mapping(skb);
3415 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003416
Merav Sicron55c11942012-11-07 00:45:48 +00003417 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003418
Merav Sicron65565882012-06-19 07:48:26 +00003419 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003420
3421 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003422 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003423 txq_index, fp_index, txdata_index); */
3424
Ariel Elior6383c0b2011-07-14 08:31:57 +00003425 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003426 DP(NETIF_MSG_TX_QUEUED,
3427 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003428 txdata->cid, fp_index, txdata_index, txdata, fp); */
3429
3430 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003431 skb_shinfo(skb)->nr_frags +
3432 BDS_PER_TX_PKT +
3433 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003434 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003435 if (txdata->tx_ring_size == 0) {
3436 struct bnx2x_eth_q_stats *q_stats =
3437 bnx2x_fp_qstats(bp, txdata->parent_fp);
3438 q_stats->driver_filtered_tx_pkt++;
3439 dev_kfree_skb(skb);
3440 return NETDEV_TX_OK;
3441 }
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003442 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3443 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003444 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003445
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003446 return NETDEV_TX_BUSY;
3447 }
3448
Merav Sicron51c1a582012-03-18 10:33:38 +00003449 DP(NETIF_MSG_TX_QUEUED,
3450 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003451 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003452 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3453
3454 eth = (struct ethhdr *)skb->data;
3455
3456 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3457 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3458 if (is_broadcast_ether_addr(eth->h_dest))
3459 mac_type = BROADCAST_ADDRESS;
3460 else
3461 mac_type = MULTICAST_ADDRESS;
3462 }
3463
3464#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3465 /* First, check if we need to linearize the skb (due to FW
3466 restrictions). No need to check fragmentation if page size > 8K
3467 (there will be no violation to FW restrictions) */
3468 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3469 /* Statistics of linearization */
3470 bp->lin_cnt++;
3471 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003472 DP(NETIF_MSG_TX_QUEUED,
3473 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003474 dev_kfree_skb_any(skb);
3475 return NETDEV_TX_OK;
3476 }
3477 }
3478#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003479 /* Map skb linear data for DMA */
3480 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3481 skb_headlen(skb), DMA_TO_DEVICE);
3482 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003483 DP(NETIF_MSG_TX_QUEUED,
3484 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003485 dev_kfree_skb_any(skb);
3486 return NETDEV_TX_OK;
3487 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003488 /*
3489 Please read carefully. First we use one BD which we mark as start,
3490 then we have a parsing info BD (used for TSO or xsum),
3491 and only then we have the rest of the TSO BDs.
3492 (don't forget to mark the last one as last,
3493 and to unmap only AFTER you write to the BD ...)
3494 And above all, all pdb sizes are in words - NOT DWORDS!
3495 */
3496
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003497 /* get current pkt produced now - advance it just before sending packet
3498 * since mapping of pages may fail and cause packet to be dropped
3499 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003500 pkt_prod = txdata->tx_pkt_prod;
3501 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003502
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003503 /* get a tx_buf and first BD
3504 * tx_start_bd may be changed during SPLIT,
3505 * but first_bd will always stay first
3506 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003507 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3508 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003509 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003510
3511 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003512 SET_FLAG(tx_start_bd->general_data,
3513 ETH_TX_START_BD_PARSE_NBDS,
3514 0);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003515
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003516 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003517 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003518
3519 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003520 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003521 tx_buf->skb = skb;
3522 tx_buf->flags = 0;
3523
3524 DP(NETIF_MSG_TX_QUEUED,
3525 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003526 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003527
Jesse Grosseab6d182010-10-20 13:56:03 +00003528 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003529 tx_start_bd->vlan_or_ethertype =
3530 cpu_to_le16(vlan_tx_tag_get(skb));
3531 tx_start_bd->bd_flags.as_bitfield |=
3532 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003533 } else {
3534 /* when transmitting in a vf, start bd must hold the ethertype
3535 * for fw to enforce it
3536 */
Yuval Mintz823e1d92013-01-14 05:11:47 +00003537#ifndef BNX2X_STOP_ON_ERROR
Ariel Eliordc1ba592013-01-01 05:22:30 +00003538 if (IS_VF(bp)) {
Yuval Mintz823e1d92013-01-14 05:11:47 +00003539#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003540 tx_start_bd->vlan_or_ethertype =
3541 cpu_to_le16(ntohs(eth->h_proto));
Yuval Mintz823e1d92013-01-14 05:11:47 +00003542#ifndef BNX2X_STOP_ON_ERROR
Ariel Eliordc1ba592013-01-01 05:22:30 +00003543 } else {
3544 /* used by FW for packet accounting */
3545 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3546 }
Yuval Mintz823e1d92013-01-14 05:11:47 +00003547#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003548 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003549
3550 /* turn on parsing and get a BD */
3551 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003552
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003553 if (xmit_type & XMIT_CSUM)
3554 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003555
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003556 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003557 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003558 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3559 /* Set PBD in checksum offload case */
3560 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003561 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3562 &pbd_e2_parsing_data,
3563 xmit_type);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003564
3565 if (IS_MF_SI(bp) || IS_VF(bp)) {
3566 /* fill in the MAC addresses in the PBD - for local
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003567 * switching
3568 */
3569 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3570 &pbd_e2->src_mac_addr_mid,
3571 &pbd_e2->src_mac_addr_lo,
3572 eth->h_source);
3573 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3574 &pbd_e2->dst_mac_addr_mid,
3575 &pbd_e2->dst_mac_addr_lo,
3576 eth->h_dest);
3577 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003578
3579 SET_FLAG(pbd_e2_parsing_data,
3580 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003581 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003582 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003583 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003584 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3585 /* Set PBD in checksum offload case */
3586 if (xmit_type & XMIT_CSUM)
3587 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003588
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003589 SET_FLAG(global_data,
3590 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3591 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003592 }
3593
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003594 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003595 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3596 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003597 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003598 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3599 pkt_size = tx_start_bd->nbytes;
3600
Merav Sicron51c1a582012-03-18 10:33:38 +00003601 DP(NETIF_MSG_TX_QUEUED,
3602 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003603 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3604 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003605 tx_start_bd->bd_flags.as_bitfield,
3606 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003607
3608 if (xmit_type & XMIT_GSO) {
3609
3610 DP(NETIF_MSG_TX_QUEUED,
3611 "TSO packet len %d hlen %d total len %d tso size %d\n",
3612 skb->len, hlen, skb_headlen(skb),
3613 skb_shinfo(skb)->gso_size);
3614
3615 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3616
3617 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003618 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3619 &tx_start_bd, hlen,
3620 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003621 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003622 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3623 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003624 else
3625 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003626 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003627
3628 /* Set the PBD's parsing_data field if not zero
3629 * (for the chips newer than 57711).
3630 */
3631 if (pbd_e2_parsing_data)
3632 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3633
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003634 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3635
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003636 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003637 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3638 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3639
Eric Dumazet9e903e02011-10-18 21:00:24 +00003640 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3641 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003642 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003643 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003644
Merav Sicron51c1a582012-03-18 10:33:38 +00003645 DP(NETIF_MSG_TX_QUEUED,
3646 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003647
3648 /* we need unmap all buffers already mapped
3649 * for this SKB;
3650 * first_bd->nbd need to be properly updated
3651 * before call to bnx2x_free_tx_pkt
3652 */
3653 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003654 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003655 TX_BD(txdata->tx_pkt_prod),
3656 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003657 return NETDEV_TX_OK;
3658 }
3659
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003660 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003661 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003662 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003663 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003664
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003665 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3666 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003667 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3668 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003669 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003670
3671 DP(NETIF_MSG_TX_QUEUED,
3672 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3673 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3674 le16_to_cpu(tx_data_bd->nbytes));
3675 }
3676
3677 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3678
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003679 /* update with actual num BDs */
3680 first_bd->nbd = cpu_to_le16(nbd);
3681
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003682 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3683
3684 /* now send a tx doorbell, counting the next BD
3685 * if the packet contains or ends with it
3686 */
3687 if (TX_BD_POFF(bd_prod) < nbd)
3688 nbd++;
3689
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003690 /* total_pkt_bytes should be set on the first data BD if
3691 * it's not an LSO packet and there is more than one
3692 * data BD. In this case pkt_size is limited by an MTU value.
3693 * However we prefer to set it for an LSO packet (while we don't
3694 * have to) in order to save some CPU cycles in a none-LSO
3695 * case, when we much more care about them.
3696 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003697 if (total_pkt_bd != NULL)
3698 total_pkt_bd->total_pkt_bytes = pkt_size;
3699
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003700 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003701 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003702 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003703 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3704 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3705 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3706 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003707 if (pbd_e2)
3708 DP(NETIF_MSG_TX_QUEUED,
3709 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3710 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3711 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3712 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3713 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003714 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3715
Tom Herbert2df1a702011-11-28 16:33:37 +00003716 netdev_tx_sent_queue(txq, skb->len);
3717
Willem de Bruijn8373c572012-04-27 09:04:06 +00003718 skb_tx_timestamp(skb);
3719
Ariel Elior6383c0b2011-07-14 08:31:57 +00003720 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003721 /*
3722 * Make sure that the BD data is updated before updating the producer
3723 * since FW might read the BD right after the producer is updated.
3724 * This is only applicable for weak-ordered memory model archs such
3725 * as IA-64. The following barrier is also mandatory since FW will
3726 * assumes packets must have BDs.
3727 */
3728 wmb();
3729
Ariel Elior6383c0b2011-07-14 08:31:57 +00003730 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003731 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003732
Ariel Elior6383c0b2011-07-14 08:31:57 +00003733 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003734
3735 mmiowb();
3736
Ariel Elior6383c0b2011-07-14 08:31:57 +00003737 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003738
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003739 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003740 netif_tx_stop_queue(txq);
3741
3742 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3743 * ordering of set_bit() in netif_tx_stop_queue() and read of
3744 * fp->bd_tx_cons */
3745 smp_mb();
3746
Barak Witkowski15192a82012-06-19 07:48:28 +00003747 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003748 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003749 netif_tx_wake_queue(txq);
3750 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003751 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003752
3753 return NETDEV_TX_OK;
3754}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003755
Ariel Elior6383c0b2011-07-14 08:31:57 +00003756/**
3757 * bnx2x_setup_tc - routine to configure net_device for multi tc
3758 *
3759 * @netdev: net device to configure
3760 * @tc: number of traffic classes to enable
3761 *
3762 * callback connected to the ndo_setup_tc function pointer
3763 */
3764int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3765{
3766 int cos, prio, count, offset;
3767 struct bnx2x *bp = netdev_priv(dev);
3768
3769 /* setup tc must be called under rtnl lock */
3770 ASSERT_RTNL();
3771
3772 /* no traffic classes requested. aborting */
3773 if (!num_tc) {
3774 netdev_reset_tc(dev);
3775 return 0;
3776 }
3777
3778 /* requested to support too many traffic classes */
3779 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003780 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3781 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003782 return -EINVAL;
3783 }
3784
3785 /* declare amount of supported traffic classes */
3786 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003787 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003788 return -EINVAL;
3789 }
3790
3791 /* configure priority to traffic class mapping */
3792 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3793 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003794 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3795 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003796 prio, bp->prio_to_cos[prio]);
3797 }
3798
3799
3800 /* Use this configuration to diffrentiate tc0 from other COSes
3801 This can be used for ets or pfc, and save the effort of setting
3802 up a multio class queue disc or negotiating DCBX with a switch
3803 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003804 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003805 for (prio = 1; prio < 16; prio++) {
3806 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003807 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003808 } */
3809
3810 /* configure traffic class to transmission queue mapping */
3811 for (cos = 0; cos < bp->max_cos; cos++) {
3812 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003813 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003814 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003815 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3816 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003817 cos, offset, count);
3818 }
3819
3820 return 0;
3821}
3822
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003823/* called with rtnl_lock */
3824int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3825{
3826 struct sockaddr *addr = p;
3827 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003828 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003829
Merav Sicron51c1a582012-03-18 10:33:38 +00003830 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3831 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003832 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003833 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003834
Barak Witkowskia3348722012-04-23 03:04:46 +00003835 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3836 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003837 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003838 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003839 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003840
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003841 if (netif_running(dev)) {
3842 rc = bnx2x_set_eth_mac(bp, false);
3843 if (rc)
3844 return rc;
3845 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003846
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003847 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3848
3849 if (netif_running(dev))
3850 rc = bnx2x_set_eth_mac(bp, true);
3851
3852 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003853}
3854
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003855static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3856{
3857 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3858 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003859 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003860
3861 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00003862
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003863 if (IS_FCOE_IDX(fp_index)) {
3864 memset(sb, 0, sizeof(union host_hc_status_block));
3865 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003866 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003867 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003868 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003869 BNX2X_PCI_FREE(sb->e2_sb,
3870 bnx2x_fp(bp, fp_index,
3871 status_blk_mapping),
3872 sizeof(struct host_hc_status_block_e2));
3873 else
3874 BNX2X_PCI_FREE(sb->e1x_sb,
3875 bnx2x_fp(bp, fp_index,
3876 status_blk_mapping),
3877 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003878 }
Merav Sicron55c11942012-11-07 00:45:48 +00003879
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003880 /* Rx */
3881 if (!skip_rx_queue(bp, fp_index)) {
3882 bnx2x_free_rx_bds(fp);
3883
3884 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3885 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3886 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3887 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3888 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3889
3890 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3891 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3892 sizeof(struct eth_fast_path_rx_cqe) *
3893 NUM_RCQ_BD);
3894
3895 /* SGE ring */
3896 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3897 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3898 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3899 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3900 }
3901
3902 /* Tx */
3903 if (!skip_tx_queue(bp, fp_index)) {
3904 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003905 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003906 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003907
Merav Sicron51c1a582012-03-18 10:33:38 +00003908 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003909 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003910 fp_index, cos, txdata->cid);
3911
3912 BNX2X_FREE(txdata->tx_buf_ring);
3913 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3914 txdata->tx_desc_mapping,
3915 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3916 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003917 }
3918 /* end of fastpath */
3919}
3920
Merav Sicron55c11942012-11-07 00:45:48 +00003921void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3922{
3923 int i;
3924 for_each_cnic_queue(bp, i)
3925 bnx2x_free_fp_mem_at(bp, i);
3926}
3927
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003928void bnx2x_free_fp_mem(struct bnx2x *bp)
3929{
3930 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00003931 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003932 bnx2x_free_fp_mem_at(bp, i);
3933}
3934
Eric Dumazet1191cb82012-04-27 21:39:21 +00003935static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003936{
3937 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003938 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003939 bnx2x_fp(bp, index, sb_index_values) =
3940 (__le16 *)status_blk.e2_sb->sb.index_values;
3941 bnx2x_fp(bp, index, sb_running_index) =
3942 (__le16 *)status_blk.e2_sb->sb.running_index;
3943 } else {
3944 bnx2x_fp(bp, index, sb_index_values) =
3945 (__le16 *)status_blk.e1x_sb->sb.index_values;
3946 bnx2x_fp(bp, index, sb_running_index) =
3947 (__le16 *)status_blk.e1x_sb->sb.running_index;
3948 }
3949}
3950
Eric Dumazet1191cb82012-04-27 21:39:21 +00003951/* Returns the number of actually allocated BDs */
3952static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3953 int rx_ring_size)
3954{
3955 struct bnx2x *bp = fp->bp;
3956 u16 ring_prod, cqe_ring_prod;
3957 int i, failure_cnt = 0;
3958
3959 fp->rx_comp_cons = 0;
3960 cqe_ring_prod = ring_prod = 0;
3961
3962 /* This routine is called only during fo init so
3963 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3964 */
3965 for (i = 0; i < rx_ring_size; i++) {
3966 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3967 failure_cnt++;
3968 continue;
3969 }
3970 ring_prod = NEXT_RX_IDX(ring_prod);
3971 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3972 WARN_ON(ring_prod <= (i - failure_cnt));
3973 }
3974
3975 if (failure_cnt)
3976 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3977 i - failure_cnt, fp->index);
3978
3979 fp->rx_bd_prod = ring_prod;
3980 /* Limit the CQE producer by the CQE ring size */
3981 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3982 cqe_ring_prod);
3983 fp->rx_pkt = fp->rx_calls = 0;
3984
Barak Witkowski15192a82012-06-19 07:48:28 +00003985 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00003986
3987 return i - failure_cnt;
3988}
3989
3990static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3991{
3992 int i;
3993
3994 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3995 struct eth_rx_cqe_next_page *nextpg;
3996
3997 nextpg = (struct eth_rx_cqe_next_page *)
3998 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3999 nextpg->addr_hi =
4000 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4001 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4002 nextpg->addr_lo =
4003 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4004 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4005 }
4006}
4007
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004008static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4009{
4010 union host_hc_status_block *sb;
4011 struct bnx2x_fastpath *fp = &bp->fp[index];
4012 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004013 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004014 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004015
Barak Witkowskia3348722012-04-23 03:04:46 +00004016 if (!bp->rx_ring_size &&
4017 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004018 rx_ring_size = MIN_RX_SIZE_NONTPA;
4019 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004020 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004021 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4022
Yuval Mintz065f8b92012-10-03 04:22:59 +00004023 if (CHIP_IS_E3(bp)) {
4024 u32 cfg = SHMEM_RD(bp,
4025 dev_info.port_hw_config[BP_PORT(bp)].
4026 default_cfg);
4027
4028 /* Decrease ring size for 1G functions */
4029 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4030 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4031 rx_ring_size /= 10;
4032 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004033
David S. Miller8decf862011-09-22 03:23:13 -04004034 /* allocate at least number of buffers required by FW */
4035 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4036 MIN_RX_SIZE_TPA, rx_ring_size);
4037
4038 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004039 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004040 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004041
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004042 /* Common */
4043 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004044
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004045 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004046 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004047 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004048 BNX2X_PCI_ALLOC(sb->e2_sb,
4049 &bnx2x_fp(bp, index, status_blk_mapping),
4050 sizeof(struct host_hc_status_block_e2));
4051 else
4052 BNX2X_PCI_ALLOC(sb->e1x_sb,
4053 &bnx2x_fp(bp, index, status_blk_mapping),
4054 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004055 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004056
4057 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4058 * set shortcuts for it.
4059 */
4060 if (!IS_FCOE_IDX(index))
4061 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004062
4063 /* Tx */
4064 if (!skip_tx_queue(bp, index)) {
4065 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004066 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004067 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004068
Merav Sicron51c1a582012-03-18 10:33:38 +00004069 DP(NETIF_MSG_IFUP,
4070 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004071 index, cos);
4072
4073 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004074 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004075 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4076 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004077 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004078 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004079 }
4080
4081 /* Rx */
4082 if (!skip_rx_queue(bp, index)) {
4083 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4084 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4085 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4086 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4087 &bnx2x_fp(bp, index, rx_desc_mapping),
4088 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4089
4090 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4091 &bnx2x_fp(bp, index, rx_comp_mapping),
4092 sizeof(struct eth_fast_path_rx_cqe) *
4093 NUM_RCQ_BD);
4094
4095 /* SGE ring */
4096 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4097 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4098 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4099 &bnx2x_fp(bp, index, rx_sge_mapping),
4100 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4101 /* RX BD ring */
4102 bnx2x_set_next_page_rx_bd(fp);
4103
4104 /* CQ ring */
4105 bnx2x_set_next_page_rx_cq(fp);
4106
4107 /* BDs */
4108 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4109 if (ring_size < rx_ring_size)
4110 goto alloc_mem_err;
4111 }
4112
4113 return 0;
4114
4115/* handles low memory cases */
4116alloc_mem_err:
4117 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4118 index, ring_size);
4119 /* FW will drop all packets if queue is not big enough,
4120 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004121 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004122 */
4123 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004124 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004125 /* release memory allocated for this queue */
4126 bnx2x_free_fp_mem_at(bp, index);
4127 return -ENOMEM;
4128 }
4129 return 0;
4130}
4131
Merav Sicron55c11942012-11-07 00:45:48 +00004132int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004133{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004134 if (!NO_FCOE(bp))
4135 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004136 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004137 /* we will fail load process instead of mark
4138 * NO_FCOE_FLAG
4139 */
4140 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004141
4142 return 0;
4143}
4144
4145int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4146{
4147 int i;
4148
4149 /* 1. Allocate FP for leading - fatal if error
4150 * 2. Allocate RSS - fix number of queues if error
4151 */
4152
4153 /* leading */
4154 if (bnx2x_alloc_fp_mem_at(bp, 0))
4155 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004156
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004157 /* RSS */
4158 for_each_nondefault_eth_queue(bp, i)
4159 if (bnx2x_alloc_fp_mem_at(bp, i))
4160 break;
4161
4162 /* handle memory failures */
4163 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4164 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4165
4166 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004167 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004168 if (CNIC_SUPPORT(bp))
4169 /* move non eth FPs next to last eth FP
4170 * must be done in that order
4171 * FCOE_IDX < FWD_IDX < OOO_IDX
4172 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004173
Merav Sicron55c11942012-11-07 00:45:48 +00004174 /* move FCoE fp even NO_FCOE_FLAG is on */
4175 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4176 bp->num_ethernet_queues -= delta;
4177 bp->num_queues = bp->num_ethernet_queues +
4178 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004179 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4180 bp->num_queues + delta, bp->num_queues);
4181 }
4182
4183 return 0;
4184}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004185
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004186void bnx2x_free_mem_bp(struct bnx2x *bp)
4187{
Barak Witkowski15192a82012-06-19 07:48:28 +00004188 kfree(bp->fp->tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004189 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004190 kfree(bp->sp_objs);
4191 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004192 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004193 kfree(bp->msix_table);
4194 kfree(bp->ilt);
4195}
4196
Bill Pemberton0329aba2012-12-03 09:24:24 -05004197int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004198{
4199 struct bnx2x_fastpath *fp;
4200 struct msix_entry *tbl;
4201 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004202 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004203 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004204 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004205
Ariel Elior6383c0b2011-07-14 08:31:57 +00004206 /*
4207 * The biggest MSI-X table we might need is as a maximum number of fast
4208 * path IGU SBs plus default SB (for PF).
4209 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004210 msix_table_size = bp->igu_sb_cnt;
4211 if (IS_PF(bp))
4212 msix_table_size++;
4213 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004214
4215 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004216 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004217 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4218
4219 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004220 if (!fp)
4221 goto alloc_err;
Barak Witkowski15192a82012-06-19 07:48:28 +00004222 for (i = 0; i < fp_array_size; i++) {
4223 fp[i].tpa_info =
4224 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4225 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4226 if (!(fp[i].tpa_info))
4227 goto alloc_err;
4228 }
4229
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004230 bp->fp = fp;
4231
Barak Witkowski15192a82012-06-19 07:48:28 +00004232 /* allocate sp objs */
4233 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4234 GFP_KERNEL);
4235 if (!bp->sp_objs)
4236 goto alloc_err;
4237
4238 /* allocate fp_stats */
4239 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4240 GFP_KERNEL);
4241 if (!bp->fp_stats)
4242 goto alloc_err;
4243
Merav Sicron65565882012-06-19 07:48:26 +00004244 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004245 txq_array_size =
4246 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4247 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4248
4249 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4250 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004251 if (!bp->bnx2x_txq)
4252 goto alloc_err;
4253
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004254 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004255 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004256 if (!tbl)
4257 goto alloc_err;
4258 bp->msix_table = tbl;
4259
4260 /* ilt */
4261 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4262 if (!ilt)
4263 goto alloc_err;
4264 bp->ilt = ilt;
4265
4266 return 0;
4267alloc_err:
4268 bnx2x_free_mem_bp(bp);
4269 return -ENOMEM;
4270
4271}
4272
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004273int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004274{
4275 struct bnx2x *bp = netdev_priv(dev);
4276
4277 if (unlikely(!netif_running(dev)))
4278 return 0;
4279
Yuval Mintz5d07d862012-09-13 02:56:21 +00004280 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004281 return bnx2x_nic_load(bp, LOAD_NORMAL);
4282}
4283
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004284int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4285{
4286 u32 sel_phy_idx = 0;
4287 if (bp->link_params.num_phys <= 1)
4288 return INT_PHY;
4289
4290 if (bp->link_vars.link_up) {
4291 sel_phy_idx = EXT_PHY1;
4292 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4293 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4294 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4295 sel_phy_idx = EXT_PHY2;
4296 } else {
4297
4298 switch (bnx2x_phy_selection(&bp->link_params)) {
4299 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4300 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4301 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4302 sel_phy_idx = EXT_PHY1;
4303 break;
4304 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4305 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4306 sel_phy_idx = EXT_PHY2;
4307 break;
4308 }
4309 }
4310
4311 return sel_phy_idx;
4312
4313}
4314int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4315{
4316 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4317 /*
4318 * The selected actived PHY is always after swapping (in case PHY
4319 * swapping is enabled). So when swapping is enabled, we need to reverse
4320 * the configuration
4321 */
4322
4323 if (bp->link_params.multi_phy_config &
4324 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4325 if (sel_phy_idx == EXT_PHY1)
4326 sel_phy_idx = EXT_PHY2;
4327 else if (sel_phy_idx == EXT_PHY2)
4328 sel_phy_idx = EXT_PHY1;
4329 }
4330 return LINK_CONFIG_IDX(sel_phy_idx);
4331}
4332
Merav Sicron55c11942012-11-07 00:45:48 +00004333#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004334int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4335{
4336 struct bnx2x *bp = netdev_priv(dev);
4337 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4338
4339 switch (type) {
4340 case NETDEV_FCOE_WWNN:
4341 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4342 cp->fcoe_wwn_node_name_lo);
4343 break;
4344 case NETDEV_FCOE_WWPN:
4345 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4346 cp->fcoe_wwn_port_name_lo);
4347 break;
4348 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004349 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004350 return -EINVAL;
4351 }
4352
4353 return 0;
4354}
4355#endif
4356
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004357/* called with rtnl_lock */
4358int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4359{
4360 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004361
4362 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004363 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004364 return -EAGAIN;
4365 }
4366
4367 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004368 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4369 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004370 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004371 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004372
4373 /* This does not race with packet allocation
4374 * because the actual alloc size is
4375 * only updated as part of load
4376 */
4377 dev->mtu = new_mtu;
4378
Michał Mirosław66371c42011-04-12 09:38:23 +00004379 return bnx2x_reload_if_running(dev);
4380}
4381
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004382netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004383 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004384{
4385 struct bnx2x *bp = netdev_priv(dev);
4386
4387 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004388 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004389 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004390 features &= ~NETIF_F_GRO;
4391 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004392
4393 return features;
4394}
4395
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004396int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004397{
4398 struct bnx2x *bp = netdev_priv(dev);
4399 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004400 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004401
4402 if (features & NETIF_F_LRO)
4403 flags |= TPA_ENABLE_FLAG;
4404 else
4405 flags &= ~TPA_ENABLE_FLAG;
4406
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004407 if (features & NETIF_F_GRO)
4408 flags |= GRO_ENABLE_FLAG;
4409 else
4410 flags &= ~GRO_ENABLE_FLAG;
4411
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004412 if (features & NETIF_F_LOOPBACK) {
4413 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4414 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4415 bnx2x_reload = true;
4416 }
4417 } else {
4418 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4419 bp->link_params.loopback_mode = LOOPBACK_NONE;
4420 bnx2x_reload = true;
4421 }
4422 }
4423
Michał Mirosław66371c42011-04-12 09:38:23 +00004424 if (flags ^ bp->flags) {
4425 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004426 bnx2x_reload = true;
4427 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004428
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004429 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004430 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4431 return bnx2x_reload_if_running(dev);
4432 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004433 }
4434
Michał Mirosław66371c42011-04-12 09:38:23 +00004435 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004436}
4437
4438void bnx2x_tx_timeout(struct net_device *dev)
4439{
4440 struct bnx2x *bp = netdev_priv(dev);
4441
4442#ifdef BNX2X_STOP_ON_ERROR
4443 if (!bp->panic)
4444 bnx2x_panic();
4445#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004446
4447 smp_mb__before_clear_bit();
4448 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4449 smp_mb__after_clear_bit();
4450
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004451 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004452 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004453}
4454
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004455int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4456{
4457 struct net_device *dev = pci_get_drvdata(pdev);
4458 struct bnx2x *bp;
4459
4460 if (!dev) {
4461 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4462 return -ENODEV;
4463 }
4464 bp = netdev_priv(dev);
4465
4466 rtnl_lock();
4467
4468 pci_save_state(pdev);
4469
4470 if (!netif_running(dev)) {
4471 rtnl_unlock();
4472 return 0;
4473 }
4474
4475 netif_device_detach(dev);
4476
Yuval Mintz5d07d862012-09-13 02:56:21 +00004477 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004478
4479 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4480
4481 rtnl_unlock();
4482
4483 return 0;
4484}
4485
4486int bnx2x_resume(struct pci_dev *pdev)
4487{
4488 struct net_device *dev = pci_get_drvdata(pdev);
4489 struct bnx2x *bp;
4490 int rc;
4491
4492 if (!dev) {
4493 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4494 return -ENODEV;
4495 }
4496 bp = netdev_priv(dev);
4497
4498 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004499 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004500 return -EAGAIN;
4501 }
4502
4503 rtnl_lock();
4504
4505 pci_restore_state(pdev);
4506
4507 if (!netif_running(dev)) {
4508 rtnl_unlock();
4509 return 0;
4510 }
4511
4512 bnx2x_set_power_state(bp, PCI_D0);
4513 netif_device_attach(dev);
4514
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004515 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4516
4517 rtnl_unlock();
4518
4519 return rc;
4520}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004521
4522
4523void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4524 u32 cid)
4525{
4526 /* ustorm cxt validation */
4527 cxt->ustorm_ag_context.cdu_usage =
4528 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4529 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4530 /* xcontext validation */
4531 cxt->xstorm_ag_context.cdu_reserved =
4532 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4533 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4534}
4535
Eric Dumazet1191cb82012-04-27 21:39:21 +00004536static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4537 u8 fw_sb_id, u8 sb_index,
4538 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004539{
4540
4541 u32 addr = BAR_CSTRORM_INTMEM +
4542 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4543 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004544 DP(NETIF_MSG_IFUP,
4545 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4546 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004547}
4548
Eric Dumazet1191cb82012-04-27 21:39:21 +00004549static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4550 u16 fw_sb_id, u8 sb_index,
4551 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004552{
4553 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4554 u32 addr = BAR_CSTRORM_INTMEM +
4555 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4556 u16 flags = REG_RD16(bp, addr);
4557 /* clear and set */
4558 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4559 flags |= enable_flag;
4560 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004561 DP(NETIF_MSG_IFUP,
4562 "port %x fw_sb_id %d sb_index %d disable %d\n",
4563 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004564}
4565
4566void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4567 u8 sb_index, u8 disable, u16 usec)
4568{
4569 int port = BP_PORT(bp);
4570 u8 ticks = usec / BNX2X_BTR;
4571
4572 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4573
4574 disable = disable ? 1 : (usec ? 0 : 1);
4575 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4576}