blob: dbcff509dc3f6d62cf48c729563196de8c4c7904 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030027#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000028#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000031#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
stephen hemmingera8f47eb2014-01-09 22:20:11 -080033static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39{
40 int i;
41
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
47 }
48}
49
50static void bnx2x_add_all_napi(struct bnx2x *bp)
51{
52 int i;
53
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
64 return bnx2x_num_queues ?
65 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
66 min_t(int, netif_get_num_default_rss_queues(),
67 BNX2X_MAX_QUEUES(bp));
68}
69
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000070/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000071 * bnx2x_move_fp - move content of the fastpath structure.
72 *
73 * @bp: driver handle
74 * @from: source FP index
75 * @to: destination FP index
76 *
77 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000078 * intact. This is done by first copying the napi struct from
79 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000080 * source onto the target. Update txdata pointers and related
81 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000082 */
83static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
84{
85 struct bnx2x_fastpath *from_fp = &bp->fp[from];
86 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000087 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
88 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
89 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
90 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000091 int old_max_eth_txqs, new_max_eth_txqs;
92 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030093 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000094
95 /* Copy the NAPI object as it has been already initialized */
96 from_fp->napi = to_fp->napi;
97
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000098 /* Move bnx2x_fastpath contents */
99 memcpy(to_fp, from_fp, sizeof(*to_fp));
100 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000101
Yuval Mintz34d56262013-08-28 01:13:01 +0300102 /* Retain the tpa_info of the original `to' version as we don't want
103 * 2 FPs to contain the same tpa_info pointer.
104 */
105 to_fp->tpa_info = old_tpa_info;
106
Barak Witkowski15192a82012-06-19 07:48:28 +0000107 /* move sp_objs contents as well, as their indices match fp ones */
108 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
109
110 /* move fp_stats contents as well, as their indices match fp ones */
111 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
112
Merav Sicron65565882012-06-19 07:48:26 +0000113 /* Update txdata pointers in fp and move txdata content accordingly:
114 * Each fp consumes 'max_cos' txdata structures, so the index should be
115 * decremented by max_cos x delta.
116 */
117
118 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
119 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
120 (bp)->max_cos;
121 if (from == FCOE_IDX(bp)) {
122 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
123 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
124 }
125
Yuval Mintz4864a162013-01-10 04:53:39 +0000126 memcpy(&bp->bnx2x_txq[new_txdata_index],
127 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000128 sizeof(struct bnx2x_fp_txdata));
129 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000130}
131
Ariel Elior8ca5e172013-01-01 05:22:34 +0000132/**
133 * bnx2x_fill_fw_str - Fill buffer with FW version string.
134 *
135 * @bp: driver handle
136 * @buf: character buffer to fill with the fw name
137 * @buf_len: length of the above buffer
138 *
139 */
140void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
141{
142 if (IS_PF(bp)) {
143 u8 phy_fw_ver[PHY_FW_VER_LEN];
144
145 phy_fw_ver[0] = '\0';
146 bnx2x_get_ext_phy_fw_version(&bp->link_params,
147 phy_fw_ver, PHY_FW_VER_LEN);
148 strlcpy(buf, bp->fw_ver, buf_len);
149 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
150 "bc %d.%d.%d%s%s",
151 (bp->common.bc_ver & 0xff0000) >> 16,
152 (bp->common.bc_ver & 0xff00) >> 8,
153 (bp->common.bc_ver & 0xff),
154 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
155 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000156 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000157 }
158}
159
David S. Miller4b87f922013-01-15 15:05:59 -0500160/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000161 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
162 *
163 * @bp: driver handle
164 * @delta: number of eth queues which were not allocated
165 */
166static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
167{
168 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
169
170 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000171 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000172 */
173 for (cos = 1; cos < bp->max_cos; cos++) {
174 for (i = 0; i < old_eth_num - delta; i++) {
175 struct bnx2x_fastpath *fp = &bp->fp[i];
176 int new_idx = cos * (old_eth_num - delta) + i;
177
178 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
179 sizeof(struct bnx2x_fp_txdata));
180 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
181 }
182 }
183}
184
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800185int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300186
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000187/* free skb in the packet ring at pos idx
188 * return idx of last bd freed
189 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000190static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000191 u16 idx, unsigned int *pkts_compl,
192 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000193{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000195 struct eth_tx_start_bd *tx_start_bd;
196 struct eth_tx_bd *tx_data_bd;
197 struct sk_buff *skb = tx_buf->skb;
198 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
199 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100200 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000201
202 /* prefetch skb end pointer to speedup dev_kfree_skb() */
203 prefetch(&skb->end);
204
Merav Sicron51c1a582012-03-18 10:33:38 +0000205 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000206 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000207
Ariel Elior6383c0b2011-07-14 08:31:57 +0000208 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000209
210 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
211#ifdef BNX2X_STOP_ON_ERROR
212 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
213 BNX2X_ERR("BAD nbd!\n");
214 bnx2x_panic();
215 }
216#endif
217 new_cons = nbd + tx_buf->first_bd;
218
219 /* Get the next bd */
220 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
221
222 /* Skip a parse bd... */
223 --nbd;
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100226 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000227 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100228 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
229 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232 }
233
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100234 /* unmap first bd */
235 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
236 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
237 DMA_TO_DEVICE);
238
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000239 /* now free frags */
240 while (nbd > 0) {
241
Ariel Elior6383c0b2011-07-14 08:31:57 +0000242 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000243 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
244 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
245 if (--nbd)
246 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
247 }
248
249 /* release skb */
250 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000251 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000252 (*pkts_compl)++;
253 (*bytes_compl) += skb->len;
254 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000255
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000256 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000257 tx_buf->first_bd = 0;
258 tx_buf->skb = NULL;
259
260 return new_cons;
261}
262
Ariel Elior6383c0b2011-07-14 08:31:57 +0000263int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000264{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000265 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000266 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000267 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268
269#ifdef BNX2X_STOP_ON_ERROR
270 if (unlikely(bp->panic))
271 return -1;
272#endif
273
Ariel Elior6383c0b2011-07-14 08:31:57 +0000274 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
275 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
276 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277
278 while (sw_cons != hw_cons) {
279 u16 pkt_cons;
280
281 pkt_cons = TX_BD(sw_cons);
282
Merav Sicron51c1a582012-03-18 10:33:38 +0000283 DP(NETIF_MSG_TX_DONE,
284 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000285 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000286
Tom Herbert2df1a702011-11-28 16:33:37 +0000287 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000288 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000289
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000290 sw_cons++;
291 }
292
Tom Herbert2df1a702011-11-28 16:33:37 +0000293 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
294
Ariel Elior6383c0b2011-07-14 08:31:57 +0000295 txdata->tx_pkt_cons = sw_cons;
296 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000297
298 /* Need to make the tx_bd_cons update visible to start_xmit()
299 * before checking for netif_tx_queue_stopped(). Without the
300 * memory barrier, there is a small possibility that
301 * start_xmit() will miss it and cause the queue to be stopped
302 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300303 * On the other hand we need an rmb() here to ensure the proper
304 * ordering of bit testing in the following
305 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000306 */
307 smp_mb();
308
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000309 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000310 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000311 * while it's empty. This could have happen if rx_action() gets
312 * suspended in bnx2x_tx_int() after the condition before
313 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
314 *
315 * stops the queue->sees fresh tx_bd_cons->releases the queue->
316 * sends some packets consuming the whole queue again->
317 * stops the queue
318 */
319
320 __netif_tx_lock(txq, smp_processor_id());
321
322 if ((netif_tx_queue_stopped(txq)) &&
323 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000324 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000325 netif_tx_wake_queue(txq);
326
327 __netif_tx_unlock(txq);
328 }
329 return 0;
330}
331
332static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
333 u16 idx)
334{
335 u16 last_max = fp->last_max_sge;
336
337 if (SUB_S16(idx, last_max) > 0)
338 fp->last_max_sge = idx;
339}
340
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000341static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
342 u16 sge_len,
343 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000344{
345 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000346 u16 last_max, last_elem, first_elem;
347 u16 delta = 0;
348 u16 i;
349
350 if (!sge_len)
351 return;
352
353 /* First mark all used pages */
354 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300355 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000356 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000357
358 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000359 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000360
361 /* Here we assume that the last SGE index is the biggest */
362 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000363 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000364 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000365
366 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300367 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
368 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000369
370 /* If ring is not full */
371 if (last_elem + 1 != first_elem)
372 last_elem++;
373
374 /* Now update the prod */
375 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
376 if (likely(fp->sge_mask[i]))
377 break;
378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300379 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
380 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000381 }
382
383 if (delta > 0) {
384 fp->rx_sge_prod += delta;
385 /* clear page-end entries */
386 bnx2x_clear_sge_mask_next_elems(fp);
387 }
388
389 DP(NETIF_MSG_RX_STATUS,
390 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
391 fp->last_max_sge, fp->rx_sge_prod);
392}
393
Yuval Mintz2de67432013-01-23 03:21:43 +0000394/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000395 * CQE (calculated by HW).
396 */
397static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000398 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800399 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000400{
Yuval Mintz2de67432013-01-23 03:21:43 +0000401 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000402 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000403 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
404 enum eth_rss_hash_type htype;
405
406 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800407 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
408 (htype == TCP_IPV6_HASH_TYPE)) ?
409 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
410
Eric Dumazete52fcb22011-11-14 06:05:34 +0000411 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000412 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800413 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000414 return 0;
415}
416
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000417static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000418 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300419 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000420{
421 struct bnx2x *bp = fp->bp;
422 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
423 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
424 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
425 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300426 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
427 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000428
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300429 /* print error if current state != stop */
430 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000431 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
432
Eric Dumazete52fcb22011-11-14 06:05:34 +0000433 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300434 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000435 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300436 fp->rx_buf_size, DMA_FROM_DEVICE);
437 /*
438 * ...if it fails - move the skb from the consumer to the producer
439 * and set the current aggregation state as ERROR to drop it
440 * when TPA_STOP arrives.
441 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000442
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300443 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
444 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000445 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300446 tpa_info->tpa_state = BNX2X_TPA_ERROR;
447 return;
448 }
449
Eric Dumazete52fcb22011-11-14 06:05:34 +0000450 /* move empty data from pool to prod */
451 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300452 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000453 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000454 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
455 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
456
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300457 /* move partial skb from cons to pool (don't unmap yet) */
458 *first_buf = *cons_rx_buf;
459
460 /* mark bin state as START */
461 tpa_info->parsing_flags =
462 le16_to_cpu(cqe->pars_flags.flags);
463 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
464 tpa_info->tpa_state = BNX2X_TPA_START;
465 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
466 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800467 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000468 if (fp->mode == TPA_MODE_GRO) {
469 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000470 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000471 tpa_info->gro_size = gro_size;
472 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300473
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000474#ifdef BNX2X_STOP_ON_ERROR
475 fp->tpa_queue_used |= (1 << queue);
476#ifdef _ASM_GENERIC_INT_L64_H
477 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
478#else
479 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
480#endif
481 fp->tpa_queue_used);
482#endif
483}
484
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000485/* Timestamp option length allowed for TPA aggregation:
486 *
487 * nop nop kind length echo val
488 */
489#define TPA_TSTAMP_OPT_LEN 12
490/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000491 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000492 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000493 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000494 * @parsing_flags: parsing flags from the START CQE
495 * @len_on_bd: total length of the first packet for the
496 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000497 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000498 *
499 * Approximate value of the MSS for this aggregation calculated using
500 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000501 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000502 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000503static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000504 u16 len_on_bd, unsigned int pkt_len,
505 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000506{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000507 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300508 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000509 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300510 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
511
512 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000513 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300514 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000515 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
516 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300517 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000518 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
519 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000520
521 /* Check if there was a TCP timestamp, if there is it's will
522 * always be 12 bytes length: nop nop kind length echo val.
523 *
524 * Otherwise FW would close the aggregation.
525 */
526 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
527 hdrs_len += TPA_TSTAMP_OPT_LEN;
528
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000529 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
530
531 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
532 * to skb_shinfo(skb)->gso_segs
533 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000534 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000535}
536
Michal Schmidt996dedb2013-09-05 22:13:09 +0200537static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
538 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000539{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200540 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000541 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
542 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
543 dma_addr_t mapping;
544
545 if (unlikely(page == NULL)) {
546 BNX2X_ERR("Can't alloc sge\n");
547 return -ENOMEM;
548 }
549
550 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000551 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000552 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
553 __free_pages(page, PAGES_PER_SGE_SHIFT);
554 BNX2X_ERR("Can't map sge\n");
555 return -ENOMEM;
556 }
557
558 sw_buf->page = page;
559 dma_unmap_addr_set(sw_buf, mapping, mapping);
560
561 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
562 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
563
564 return 0;
565}
566
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000567static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000568 struct bnx2x_agg_info *tpa_info,
569 u16 pages,
570 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300571 struct eth_end_agg_rx_cqe *cqe,
572 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000573{
574 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000575 u32 i, frag_len, frag_size;
576 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300577 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000578 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000579
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300580 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000581
582 if (fp->mode == TPA_MODE_GRO) {
583 gro_size = tpa_info->gro_size;
584 full_page = tpa_info->full_page;
585 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000586
587 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000588 if (frag_size)
589 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000590 le16_to_cpu(cqe->pkt_len),
591 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000592
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000593#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000594 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000595 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
596 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300597 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598 bnx2x_panic();
599 return -EINVAL;
600 }
601#endif
602
603 /* Run through the SGL and compose the fragmented skb */
604 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300605 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000606
607 /* FW gives the indices of the SGE as if the ring is an array
608 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000609 if (fp->mode == TPA_MODE_GRO)
610 frag_len = min_t(u32, frag_size, (u32)full_page);
611 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000612 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000613
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000614 rx_pg = &fp->rx_page_ring[sge_idx];
615 old_rx_pg = *rx_pg;
616
617 /* If we fail to allocate a substitute page, we simply stop
618 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200619 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000620 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000621 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622 return err;
623 }
624
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000625 /* Unmap the page as we're going to pass it to the stack */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000626 dma_unmap_page(&bp->pdev->dev,
627 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000628 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000629 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000630 if (fp->mode == TPA_MODE_LRO)
631 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
632 else { /* GRO */
633 int rem;
634 int offset = 0;
635 for (rem = frag_len; rem > 0; rem -= gro_size) {
636 int len = rem > gro_size ? gro_size : rem;
637 skb_fill_page_desc(skb, frag_id++,
638 old_rx_pg.page, offset, len);
639 if (offset)
640 get_page(old_rx_pg.page);
641 offset += len;
642 }
643 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000644
645 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000646 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000647 skb->len += frag_len;
648
649 frag_size -= frag_len;
650 }
651
652 return 0;
653}
654
Eric Dumazetd46d1322012-12-10 12:16:06 +0000655static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
656{
657 if (fp->rx_frag_size)
658 put_page(virt_to_head_page(data));
659 else
660 kfree(data);
661}
662
Michal Schmidt996dedb2013-09-05 22:13:09 +0200663static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000664{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200665 if (fp->rx_frag_size) {
666 /* GFP_KERNEL allocations are used only during initialization */
667 if (unlikely(gfp_mask & __GFP_WAIT))
668 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000669
Michal Schmidt996dedb2013-09-05 22:13:09 +0200670 return netdev_alloc_frag(fp->rx_frag_size);
671 }
672
673 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000674}
675
Yuval Mintz99690852013-01-14 05:11:49 +0000676#ifdef CONFIG_INET
677static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
678{
679 const struct iphdr *iph = ip_hdr(skb);
680 struct tcphdr *th;
681
682 skb_set_transport_header(skb, sizeof(struct iphdr));
683 th = tcp_hdr(skb);
684
685 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
686 iph->saddr, iph->daddr, 0);
687}
688
689static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
690{
691 struct ipv6hdr *iph = ipv6_hdr(skb);
692 struct tcphdr *th;
693
694 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
695 th = tcp_hdr(skb);
696
697 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
698 &iph->saddr, &iph->daddr, 0);
699}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000700
701static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
702 void (*gro_func)(struct bnx2x*, struct sk_buff*))
703{
704 skb_set_network_header(skb, 0);
705 gro_func(bp, skb);
706 tcp_gro_complete(skb);
707}
Yuval Mintz99690852013-01-14 05:11:49 +0000708#endif
709
710static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
711 struct sk_buff *skb)
712{
713#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000714 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000715 switch (be16_to_cpu(skb->protocol)) {
716 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000717 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000718 break;
719 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000720 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000721 break;
722 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000723 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000724 be16_to_cpu(skb->protocol));
725 }
Yuval Mintz99690852013-01-14 05:11:49 +0000726 }
727#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700728 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000729 napi_gro_receive(&fp->napi, skb);
730}
731
Eric Dumazet1191cb82012-04-27 21:39:21 +0000732static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
733 struct bnx2x_agg_info *tpa_info,
734 u16 pages,
735 struct eth_end_agg_rx_cqe *cqe,
736 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000737{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300738 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000739 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300740 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000741 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000742 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300743 u8 old_tpa_state = tpa_info->tpa_state;
744
745 tpa_info->tpa_state = BNX2X_TPA_STOP;
746
747 /* If we there was an error during the handling of the TPA_START -
748 * drop this aggregation.
749 */
750 if (old_tpa_state == BNX2X_TPA_ERROR)
751 goto drop;
752
Eric Dumazete52fcb22011-11-14 06:05:34 +0000753 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200754 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000755 /* Unmap skb in the pool anyway, as we are going to change
756 pool entry status to BNX2X_TPA_STOP even if new skb allocation
757 fails. */
758 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800759 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000760 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000761 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000762
Eric Dumazete52fcb22011-11-14 06:05:34 +0000763 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000764#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800765 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000766 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800767 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000768 bnx2x_panic();
769 return;
770 }
771#endif
772
Eric Dumazete52fcb22011-11-14 06:05:34 +0000773 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000774 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800775 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000776
777 skb->protocol = eth_type_trans(skb, bp->dev);
778 skb->ip_summed = CHECKSUM_UNNECESSARY;
779
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000780 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
781 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300782 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000783 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000784 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000785 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000786 DP(NETIF_MSG_RX_STATUS,
787 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000788 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000789 }
790
Eric Dumazete52fcb22011-11-14 06:05:34 +0000791 /* put new data in bin */
792 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000793
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300794 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000795 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000796 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300797drop:
798 /* drop the packet and keep the buffer in the bin */
799 DP(NETIF_MSG_RX_STATUS,
800 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000801 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802}
803
Michal Schmidt996dedb2013-09-05 22:13:09 +0200804static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
805 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000806{
807 u8 *data;
808 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
809 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
810 dma_addr_t mapping;
811
Michal Schmidt996dedb2013-09-05 22:13:09 +0200812 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000813 if (unlikely(data == NULL))
814 return -ENOMEM;
815
816 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
817 fp->rx_buf_size,
818 DMA_FROM_DEVICE);
819 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000820 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000821 BNX2X_ERR("Can't map rx data\n");
822 return -ENOMEM;
823 }
824
825 rx_buf->data = data;
826 dma_unmap_addr_set(rx_buf, mapping, mapping);
827
828 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
829 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
830
831 return 0;
832}
833
Barak Witkowski15192a82012-06-19 07:48:28 +0000834static
835void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
836 struct bnx2x_fastpath *fp,
837 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000838{
Michal Schmidte4889212012-09-13 12:59:44 +0000839 /* Do nothing if no L4 csum validation was done.
840 * We do not check whether IP csum was validated. For IPv4 we assume
841 * that if the card got as far as validating the L4 csum, it also
842 * validated the IP csum. IPv6 has no IP csum.
843 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000844 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000845 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000846 return;
847
Michal Schmidte4889212012-09-13 12:59:44 +0000848 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000849
850 if (cqe->fast_path_cqe.type_error_flags &
851 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
852 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000853 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000854 else
855 skb->ip_summed = CHECKSUM_UNNECESSARY;
856}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000857
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800858static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000859{
860 struct bnx2x *bp = fp->bp;
861 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300862 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000863 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300864 union eth_rx_cqe *cqe;
865 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000866
867#ifdef BNX2X_STOP_ON_ERROR
868 if (unlikely(bp->panic))
869 return 0;
870#endif
871
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000872 bd_cons = fp->rx_bd_cons;
873 bd_prod = fp->rx_bd_prod;
874 bd_prod_fw = bd_prod;
875 sw_comp_cons = fp->rx_comp_cons;
876 sw_comp_prod = fp->rx_comp_prod;
877
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300878 comp_ring_cons = RCQ_BD(sw_comp_cons);
879 cqe = &fp->rx_comp_ring[comp_ring_cons];
880 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881
882 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300883 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300885 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000886 struct sw_rx_bd *rx_buf = NULL;
887 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000888 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300889 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000890 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000891 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800892 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800893 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000894
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300895#ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
897 return 0;
898#endif
899
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000900 bd_prod = RX_BD(bd_prod);
901 bd_cons = RX_BD(bd_cons);
902
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300903 cqe_fp_flags = cqe_fp->type_error_flags;
904 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000905
Merav Sicron51c1a582012-03-18 10:33:38 +0000906 DP(NETIF_MSG_RX_STATUS,
907 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
908 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300909 cqe_fp_flags, cqe_fp->status_flags,
910 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000911 le16_to_cpu(cqe_fp->vlan_tag),
912 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000913
914 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300915 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916 bnx2x_sp_event(fp, cqe);
917 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000918 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000919
Eric Dumazete52fcb22011-11-14 06:05:34 +0000920 rx_buf = &fp->rx_buf_ring[bd_cons];
921 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000922
Eric Dumazete52fcb22011-11-14 06:05:34 +0000923 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000924 struct bnx2x_agg_info *tpa_info;
925 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300926#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000927 /* sanity check */
928 if (fp->disable_tpa &&
929 (CQE_TYPE_START(cqe_fp_type) ||
930 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000931 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000932 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300933#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000934
Eric Dumazete52fcb22011-11-14 06:05:34 +0000935 if (CQE_TYPE_START(cqe_fp_type)) {
936 u16 queue = cqe_fp->queue_index;
937 DP(NETIF_MSG_RX_STATUS,
938 "calling tpa_start on queue %d\n",
939 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000940
Eric Dumazete52fcb22011-11-14 06:05:34 +0000941 bnx2x_tpa_start(fp, queue,
942 bd_cons, bd_prod,
943 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000944
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000945 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000946 }
947 queue = cqe->end_agg_cqe.queue_index;
948 tpa_info = &fp->tpa_info[queue];
949 DP(NETIF_MSG_RX_STATUS,
950 "calling tpa_stop on queue %d\n",
951 queue);
952
953 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
954 tpa_info->len_on_bd;
955
956 if (fp->mode == TPA_MODE_GRO)
957 pages = (frag_size + tpa_info->full_page - 1) /
958 tpa_info->full_page;
959 else
960 pages = SGE_PAGE_ALIGN(frag_size) >>
961 SGE_PAGE_SHIFT;
962
963 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
964 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000965#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000966 if (bp->panic)
967 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000968#endif
969
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000970 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
971 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000972 }
973 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000974 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000975 pad = cqe_fp->placement_offset;
976 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000977 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000978 pad + RX_COPY_THRESH,
979 DMA_FROM_DEVICE);
980 pad += NET_SKB_PAD;
981 prefetch(data + pad); /* speedup eth_type_trans() */
982 /* is this an error packet? */
983 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000984 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000985 "ERROR flags %x rx packet %u\n",
986 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000987 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000988 goto reuse_rx;
989 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990
Eric Dumazete52fcb22011-11-14 06:05:34 +0000991 /* Since we don't have a jumbo ring
992 * copy small packets if mtu > 1500
993 */
994 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
995 (len <= RX_COPY_THRESH)) {
996 skb = netdev_alloc_skb_ip_align(bp->dev, len);
997 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000998 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000999 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001000 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001 goto reuse_rx;
1002 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001003 memcpy(skb->data, data + pad, len);
1004 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1005 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001006 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1007 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001008 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001009 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001010 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001011 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001012 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001013 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001014 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001015 bnx2x_fp_qstats(bp, fp)->
1016 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001017 goto next_rx;
1018 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001019 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001020 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001021 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1022 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001023 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001024reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001025 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001026 goto next_rx;
1027 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001028 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001029
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001030 skb_put(skb, len);
1031 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001032
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001033 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001034 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1035 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001036
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001037 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001038
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001039 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001040 bnx2x_csum_validate(skb, cqe, fp,
1041 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001042
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001043 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001044
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001045 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1046 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001047 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001048 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001049
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001050 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001051
1052 if (bnx2x_fp_ll_polling(fp))
1053 netif_receive_skb(skb);
1054 else
1055 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001056next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001057 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001058
1059 bd_cons = NEXT_RX_IDX(bd_cons);
1060 bd_prod = NEXT_RX_IDX(bd_prod);
1061 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1062 rx_pkt++;
1063next_cqe:
1064 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1065 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1066
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001067 /* mark CQE as free */
1068 BNX2X_SEED_CQE(cqe_fp);
1069
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001070 if (rx_pkt == budget)
1071 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001072
1073 comp_ring_cons = RCQ_BD(sw_comp_cons);
1074 cqe = &fp->rx_comp_ring[comp_ring_cons];
1075 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001076 } /* while */
1077
1078 fp->rx_bd_cons = bd_cons;
1079 fp->rx_bd_prod = bd_prod_fw;
1080 fp->rx_comp_cons = sw_comp_cons;
1081 fp->rx_comp_prod = sw_comp_prod;
1082
1083 /* Update producers */
1084 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1085 fp->rx_sge_prod);
1086
1087 fp->rx_pkt += rx_pkt;
1088 fp->rx_calls++;
1089
1090 return rx_pkt;
1091}
1092
1093static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1094{
1095 struct bnx2x_fastpath *fp = fp_cookie;
1096 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001097 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098
Merav Sicron51c1a582012-03-18 10:33:38 +00001099 DP(NETIF_MSG_INTR,
1100 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001101 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001102
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001103 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001104
1105#ifdef BNX2X_STOP_ON_ERROR
1106 if (unlikely(bp->panic))
1107 return IRQ_HANDLED;
1108#endif
1109
1110 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001111 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001112 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001113
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001114 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001115 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1116
1117 return IRQ_HANDLED;
1118}
1119
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001120/* HW Lock for shared dual port PHYs */
1121void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1122{
1123 mutex_lock(&bp->port.phy_mutex);
1124
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001125 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001126}
1127
1128void bnx2x_release_phy_lock(struct bnx2x *bp)
1129{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001130 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001131
1132 mutex_unlock(&bp->port.phy_mutex);
1133}
1134
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001135/* calculates MF speed according to current linespeed and MF configuration */
1136u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1137{
1138 u16 line_speed = bp->link_vars.line_speed;
1139 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001140 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1141 bp->mf_config[BP_VN(bp)]);
1142
1143 /* Calculate the current MAX line speed limit for the MF
1144 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001145 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001146 if (IS_MF_SI(bp))
1147 line_speed = (line_speed * maxCfg) / 100;
1148 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001149 u16 vn_max_rate = maxCfg * 100;
1150
1151 if (vn_max_rate < line_speed)
1152 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001153 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001154 }
1155
1156 return line_speed;
1157}
1158
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001159/**
1160 * bnx2x_fill_report_data - fill link report data to report
1161 *
1162 * @bp: driver handle
1163 * @data: link state to update
1164 *
1165 * It uses a none-atomic bit operations because is called under the mutex.
1166 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001167static void bnx2x_fill_report_data(struct bnx2x *bp,
1168 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001169{
1170 u16 line_speed = bnx2x_get_mf_speed(bp);
1171
1172 memset(data, 0, sizeof(*data));
1173
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001174 /* Fill the report data: effective line speed */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001175 data->line_speed = line_speed;
1176
1177 /* Link is down */
1178 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1179 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1180 &data->link_report_flags);
1181
1182 /* Full DUPLEX */
1183 if (bp->link_vars.duplex == DUPLEX_FULL)
1184 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1185
1186 /* Rx Flow Control is ON */
1187 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1188 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1189
1190 /* Tx Flow Control is ON */
1191 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1192 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1193}
1194
1195/**
1196 * bnx2x_link_report - report link status to OS.
1197 *
1198 * @bp: driver handle
1199 *
1200 * Calls the __bnx2x_link_report() under the same locking scheme
1201 * as a link/PHY state managing code to ensure a consistent link
1202 * reporting.
1203 */
1204
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001205void bnx2x_link_report(struct bnx2x *bp)
1206{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001207 bnx2x_acquire_phy_lock(bp);
1208 __bnx2x_link_report(bp);
1209 bnx2x_release_phy_lock(bp);
1210}
1211
1212/**
1213 * __bnx2x_link_report - report link status to OS.
1214 *
1215 * @bp: driver handle
1216 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001217 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001218 * Should be called under the phy_lock.
1219 */
1220void __bnx2x_link_report(struct bnx2x *bp)
1221{
1222 struct bnx2x_link_report_data cur_data;
1223
1224 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001225 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001226 bnx2x_read_mf_cfg(bp);
1227
1228 /* Read the current link report info */
1229 bnx2x_fill_report_data(bp, &cur_data);
1230
1231 /* Don't report link down or exactly the same link status twice */
1232 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1233 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1234 &bp->last_reported_link.link_report_flags) &&
1235 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1236 &cur_data.link_report_flags)))
1237 return;
1238
1239 bp->link_cnt++;
1240
1241 /* We are going to report a new link parameters now -
1242 * remember the current data for the next time.
1243 */
1244 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1245
1246 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1247 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001248 netif_carrier_off(bp->dev);
1249 netdev_err(bp->dev, "NIC Link is Down\n");
1250 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001251 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001252 const char *duplex;
1253 const char *flow;
1254
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001255 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001256
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001257 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1258 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001259 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001260 else
Joe Perches94f05b02011-08-14 12:16:20 +00001261 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001262
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001263 /* Handle the FC at the end so that only these flags would be
1264 * possibly set. This way we may easily check if there is no FC
1265 * enabled.
1266 */
1267 if (cur_data.link_report_flags) {
1268 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1269 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001270 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1271 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001272 flow = "ON - receive & transmit";
1273 else
1274 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001275 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001276 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001277 }
Joe Perches94f05b02011-08-14 12:16:20 +00001278 } else {
1279 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001280 }
Joe Perches94f05b02011-08-14 12:16:20 +00001281 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1282 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001283 }
1284}
1285
Eric Dumazet1191cb82012-04-27 21:39:21 +00001286static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1287{
1288 int i;
1289
1290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1291 struct eth_rx_sge *sge;
1292
1293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1294 sge->addr_hi =
1295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1297
1298 sge->addr_lo =
1299 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1300 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1301 }
1302}
1303
1304static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1305 struct bnx2x_fastpath *fp, int last)
1306{
1307 int i;
1308
1309 for (i = 0; i < last; i++) {
1310 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1311 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1312 u8 *data = first_buf->data;
1313
1314 if (data == NULL) {
1315 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1316 continue;
1317 }
1318 if (tpa_info->tpa_state == BNX2X_TPA_START)
1319 dma_unmap_single(&bp->pdev->dev,
1320 dma_unmap_addr(first_buf, mapping),
1321 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001322 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001323 first_buf->data = NULL;
1324 }
1325}
1326
Merav Sicron55c11942012-11-07 00:45:48 +00001327void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1328{
1329 int j;
1330
1331 for_each_rx_queue_cnic(bp, j) {
1332 struct bnx2x_fastpath *fp = &bp->fp[j];
1333
1334 fp->rx_bd_cons = 0;
1335
1336 /* Activate BD ring */
1337 /* Warning!
1338 * this will generate an interrupt (to the TSTORM)
1339 * must only be done after chip is initialized
1340 */
1341 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1342 fp->rx_sge_prod);
1343 }
1344}
1345
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001346void bnx2x_init_rx_rings(struct bnx2x *bp)
1347{
1348 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001349 u16 ring_prod;
1350 int i, j;
1351
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001352 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001353 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001354 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001356 DP(NETIF_MSG_IFUP,
1357 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1358
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001359 if (!fp->disable_tpa) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001360 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001361 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001362 struct bnx2x_agg_info *tpa_info =
1363 &fp->tpa_info[i];
1364 struct sw_rx_bd *first_buf =
1365 &tpa_info->first_buf;
1366
Michal Schmidt996dedb2013-09-05 22:13:09 +02001367 first_buf->data =
1368 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001369 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001370 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1371 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001372 bnx2x_free_tpa_pool(bp, fp, i);
1373 fp->disable_tpa = 1;
1374 break;
1375 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001376 dma_unmap_addr_set(first_buf, mapping, 0);
1377 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001378 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001379
1380 /* "next page" elements initialization */
1381 bnx2x_set_next_page_sgl(fp);
1382
1383 /* set SGEs bit mask */
1384 bnx2x_init_sge_ring_bit_mask(fp);
1385
1386 /* Allocate SGEs and initialize the ring elements */
1387 for (i = 0, ring_prod = 0;
1388 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1389
Michal Schmidt996dedb2013-09-05 22:13:09 +02001390 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1391 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001392 BNX2X_ERR("was only able to allocate %d rx sges\n",
1393 i);
1394 BNX2X_ERR("disabling TPA for queue[%d]\n",
1395 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001396 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001397 bnx2x_free_rx_sge_range(bp, fp,
1398 ring_prod);
1399 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001400 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001401 fp->disable_tpa = 1;
1402 ring_prod = 0;
1403 break;
1404 }
1405 ring_prod = NEXT_SGE_IDX(ring_prod);
1406 }
1407
1408 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001409 }
1410 }
1411
Merav Sicron55c11942012-11-07 00:45:48 +00001412 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413 struct bnx2x_fastpath *fp = &bp->fp[j];
1414
1415 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001416
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001417 /* Activate BD ring */
1418 /* Warning!
1419 * this will generate an interrupt (to the TSTORM)
1420 * must only be done after chip is initialized
1421 */
1422 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1423 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001424
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001425 if (j != 0)
1426 continue;
1427
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001428 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001429 REG_WR(bp, BAR_USTRORM_INTMEM +
1430 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1431 U64_LO(fp->rx_comp_mapping));
1432 REG_WR(bp, BAR_USTRORM_INTMEM +
1433 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1434 U64_HI(fp->rx_comp_mapping));
1435 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001436 }
1437}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001438
Merav Sicron55c11942012-11-07 00:45:48 +00001439static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1440{
1441 u8 cos;
1442 struct bnx2x *bp = fp->bp;
1443
1444 for_each_cos_in_tx_queue(fp, cos) {
1445 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1446 unsigned pkts_compl = 0, bytes_compl = 0;
1447
1448 u16 sw_prod = txdata->tx_pkt_prod;
1449 u16 sw_cons = txdata->tx_pkt_cons;
1450
1451 while (sw_cons != sw_prod) {
1452 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1453 &pkts_compl, &bytes_compl);
1454 sw_cons++;
1455 }
1456
1457 netdev_tx_reset_queue(
1458 netdev_get_tx_queue(bp->dev,
1459 txdata->txq_index));
1460 }
1461}
1462
1463static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1464{
1465 int i;
1466
1467 for_each_tx_queue_cnic(bp, i) {
1468 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1469 }
1470}
1471
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1473{
1474 int i;
1475
Merav Sicron55c11942012-11-07 00:45:48 +00001476 for_each_eth_queue(bp, i) {
1477 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001478 }
1479}
1480
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001481static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1482{
1483 struct bnx2x *bp = fp->bp;
1484 int i;
1485
1486 /* ring wasn't allocated */
1487 if (fp->rx_buf_ring == NULL)
1488 return;
1489
1490 for (i = 0; i < NUM_RX_BD; i++) {
1491 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001492 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001493
Eric Dumazete52fcb22011-11-14 06:05:34 +00001494 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001495 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001496 dma_unmap_single(&bp->pdev->dev,
1497 dma_unmap_addr(rx_buf, mapping),
1498 fp->rx_buf_size, DMA_FROM_DEVICE);
1499
Eric Dumazete52fcb22011-11-14 06:05:34 +00001500 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001501 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001502 }
1503}
1504
Merav Sicron55c11942012-11-07 00:45:48 +00001505static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1506{
1507 int j;
1508
1509 for_each_rx_queue_cnic(bp, j) {
1510 bnx2x_free_rx_bds(&bp->fp[j]);
1511 }
1512}
1513
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001514static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1515{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001516 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001517
Merav Sicron55c11942012-11-07 00:45:48 +00001518 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001519 struct bnx2x_fastpath *fp = &bp->fp[j];
1520
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001521 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001522
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001523 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001524 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001525 }
1526}
1527
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001528static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001529{
1530 bnx2x_free_tx_skbs_cnic(bp);
1531 bnx2x_free_rx_skbs_cnic(bp);
1532}
1533
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001534void bnx2x_free_skbs(struct bnx2x *bp)
1535{
1536 bnx2x_free_tx_skbs(bp);
1537 bnx2x_free_rx_skbs(bp);
1538}
1539
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001540void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1541{
1542 /* load old values */
1543 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1544
1545 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1546 /* leave all but MAX value */
1547 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1548
1549 /* set new MAX value */
1550 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1551 & FUNC_MF_CFG_MAX_BW_MASK;
1552
1553 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1554 }
1555}
1556
Dmitry Kravkovca924292011-06-14 01:33:08 +00001557/**
1558 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1559 *
1560 * @bp: driver handle
1561 * @nvecs: number of vectors to be released
1562 */
1563static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001564{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001565 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001566
Dmitry Kravkovca924292011-06-14 01:33:08 +00001567 if (nvecs == offset)
1568 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001569
1570 /* VFs don't have a default SB */
1571 if (IS_PF(bp)) {
1572 free_irq(bp->msix_table[offset].vector, bp->dev);
1573 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1574 bp->msix_table[offset].vector);
1575 offset++;
1576 }
Merav Sicron55c11942012-11-07 00:45:48 +00001577
1578 if (CNIC_SUPPORT(bp)) {
1579 if (nvecs == offset)
1580 return;
1581 offset++;
1582 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001583
Dmitry Kravkovca924292011-06-14 01:33:08 +00001584 for_each_eth_queue(bp, i) {
1585 if (nvecs == offset)
1586 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001587 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1588 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001589
1590 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001591 }
1592}
1593
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001594void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001595{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001596 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001597 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1598 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1599
1600 /* vfs don't have a default status block */
1601 if (IS_PF(bp))
1602 nvecs++;
1603
1604 bnx2x_free_msix_irqs(bp, nvecs);
1605 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001606 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001607 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001608}
1609
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001610int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001611{
Ariel Elior1ab44342013-01-01 05:22:23 +00001612 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001613
Ariel Elior1ab44342013-01-01 05:22:23 +00001614 /* VFs don't have a default status block */
1615 if (IS_PF(bp)) {
1616 bp->msix_table[msix_vec].entry = msix_vec;
1617 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1618 bp->msix_table[0].entry);
1619 msix_vec++;
1620 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001621
Merav Sicron55c11942012-11-07 00:45:48 +00001622 /* Cnic requires an msix vector for itself */
1623 if (CNIC_SUPPORT(bp)) {
1624 bp->msix_table[msix_vec].entry = msix_vec;
1625 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1626 msix_vec, bp->msix_table[msix_vec].entry);
1627 msix_vec++;
1628 }
1629
Ariel Elior6383c0b2011-07-14 08:31:57 +00001630 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001631 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001632 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001633 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1634 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001635 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001636 }
1637
Ariel Elior1ab44342013-01-01 05:22:23 +00001638 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1639 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001640
Ariel Elior1ab44342013-01-01 05:22:23 +00001641 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001642
1643 /*
1644 * reconfigure number of tx/rx queues according to available
1645 * MSI-X vectors
1646 */
Merav Sicron55c11942012-11-07 00:45:48 +00001647 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001648 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001649 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001650
Merav Sicron51c1a582012-03-18 10:33:38 +00001651 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001652
1653 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1654
1655 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001656 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1657 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001658 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001659 /*
1660 * decrease number of queues by number of unallocated entries
1661 */
Merav Sicron55c11942012-11-07 00:45:48 +00001662 bp->num_ethernet_queues -= diff;
1663 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001664
Merav Sicron51c1a582012-03-18 10:33:38 +00001665 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001666 bp->num_queues);
1667 } else if (rc > 0) {
1668 /* Get by with single vector */
1669 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1670 if (rc) {
1671 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1672 rc);
1673 goto no_msix;
1674 }
1675
1676 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1677 bp->flags |= USING_SINGLE_MSIX_FLAG;
1678
Merav Sicron55c11942012-11-07 00:45:48 +00001679 BNX2X_DEV_INFO("set number of queues to 1\n");
1680 bp->num_ethernet_queues = 1;
1681 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001682 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001683 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001684 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001685 }
1686
1687 bp->flags |= USING_MSIX_FLAG;
1688
1689 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001690
1691no_msix:
1692 /* fall to INTx if not enough memory */
1693 if (rc == -ENOMEM)
1694 bp->flags |= DISABLE_MSI_FLAG;
1695
1696 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001697}
1698
1699static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1700{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001701 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001702
Ariel Eliorad5afc82013-01-01 05:22:26 +00001703 /* no default status block for vf */
1704 if (IS_PF(bp)) {
1705 rc = request_irq(bp->msix_table[offset++].vector,
1706 bnx2x_msix_sp_int, 0,
1707 bp->dev->name, bp->dev);
1708 if (rc) {
1709 BNX2X_ERR("request sp irq failed\n");
1710 return -EBUSY;
1711 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001712 }
1713
Merav Sicron55c11942012-11-07 00:45:48 +00001714 if (CNIC_SUPPORT(bp))
1715 offset++;
1716
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001717 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001718 struct bnx2x_fastpath *fp = &bp->fp[i];
1719 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1720 bp->dev->name, i);
1721
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001722 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001723 bnx2x_msix_fp_int, 0, fp->name, fp);
1724 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001725 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1726 bp->msix_table[offset].vector, rc);
1727 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001728 return -EBUSY;
1729 }
1730
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001731 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001732 }
1733
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001734 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001735 if (IS_PF(bp)) {
1736 offset = 1 + CNIC_SUPPORT(bp);
1737 netdev_info(bp->dev,
1738 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1739 bp->msix_table[0].vector,
1740 0, bp->msix_table[offset].vector,
1741 i - 1, bp->msix_table[offset + i - 1].vector);
1742 } else {
1743 offset = CNIC_SUPPORT(bp);
1744 netdev_info(bp->dev,
1745 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1746 0, bp->msix_table[offset].vector,
1747 i - 1, bp->msix_table[offset + i - 1].vector);
1748 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001749 return 0;
1750}
1751
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001752int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001753{
1754 int rc;
1755
1756 rc = pci_enable_msi(bp->pdev);
1757 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001758 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001759 return -1;
1760 }
1761 bp->flags |= USING_MSI_FLAG;
1762
1763 return 0;
1764}
1765
1766static int bnx2x_req_irq(struct bnx2x *bp)
1767{
1768 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001769 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001770
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001771 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772 flags = 0;
1773 else
1774 flags = IRQF_SHARED;
1775
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001776 if (bp->flags & USING_MSIX_FLAG)
1777 irq = bp->msix_table[0].vector;
1778 else
1779 irq = bp->pdev->irq;
1780
1781 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001782}
1783
Yuval Mintzc957d092013-06-25 08:50:11 +03001784static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001785{
1786 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001787 if (bp->flags & USING_MSIX_FLAG &&
1788 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001789 rc = bnx2x_req_msix_irqs(bp);
1790 if (rc)
1791 return rc;
1792 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001793 rc = bnx2x_req_irq(bp);
1794 if (rc) {
1795 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1796 return rc;
1797 }
1798 if (bp->flags & USING_MSI_FLAG) {
1799 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001800 netdev_info(bp->dev, "using MSI IRQ %d\n",
1801 bp->dev->irq);
1802 }
1803 if (bp->flags & USING_MSIX_FLAG) {
1804 bp->dev->irq = bp->msix_table[0].vector;
1805 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1806 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001807 }
1808 }
1809
1810 return 0;
1811}
1812
Merav Sicron55c11942012-11-07 00:45:48 +00001813static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1814{
1815 int i;
1816
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001817 for_each_rx_queue_cnic(bp, i) {
1818 bnx2x_fp_init_lock(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001819 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001820 }
Merav Sicron55c11942012-11-07 00:45:48 +00001821}
1822
Eric Dumazet1191cb82012-04-27 21:39:21 +00001823static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001824{
1825 int i;
1826
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001827 for_each_eth_queue(bp, i) {
1828 bnx2x_fp_init_lock(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001829 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001830 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001831}
1832
Merav Sicron55c11942012-11-07 00:45:48 +00001833static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1834{
1835 int i;
1836
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001837 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001838 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001839 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1840 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001841 }
Merav Sicron55c11942012-11-07 00:45:48 +00001842}
1843
Eric Dumazet1191cb82012-04-27 21:39:21 +00001844static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001845{
1846 int i;
1847
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001848 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001849 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001850 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1851 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001852 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001853}
1854
1855void bnx2x_netif_start(struct bnx2x *bp)
1856{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001857 if (netif_running(bp->dev)) {
1858 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001859 if (CNIC_LOADED(bp))
1860 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001861 bnx2x_int_enable(bp);
1862 if (bp->state == BNX2X_STATE_OPEN)
1863 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001864 }
1865}
1866
1867void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1868{
1869 bnx2x_int_disable_sync(bp, disable_hw);
1870 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001871 if (CNIC_LOADED(bp))
1872 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001873}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001874
Jason Wangf663dd92014-01-10 16:18:26 +08001875u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001876 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001877{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001878 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001879
Merav Sicron55c11942012-11-07 00:45:48 +00001880 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001881 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1882 u16 ether_type = ntohs(hdr->h_proto);
1883
1884 /* Skip VLAN tag if present */
1885 if (ether_type == ETH_P_8021Q) {
1886 struct vlan_ethhdr *vhdr =
1887 (struct vlan_ethhdr *)skb->data;
1888
1889 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1890 }
1891
1892 /* If ethertype is FCoE or FIP - use FCoE ring */
1893 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001894 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001895 }
Merav Sicron55c11942012-11-07 00:45:48 +00001896
David S. Miller823dcd22011-08-20 10:39:12 -07001897 /* select a non-FCoE queue */
Daniel Borkmann99932d42014-02-16 15:55:20 +01001898 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001899}
1900
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001901void bnx2x_set_num_queues(struct bnx2x *bp)
1902{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001903 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001904 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001905
Barak Witkowskia3348722012-04-23 03:04:46 +00001906 /* override in STORAGE SD modes */
1907 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001908 bp->num_ethernet_queues = 1;
1909
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001910 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001911 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1912 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001913
1914 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001915}
1916
David S. Miller823dcd22011-08-20 10:39:12 -07001917/**
1918 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1919 *
1920 * @bp: Driver handle
1921 *
1922 * We currently support for at most 16 Tx queues for each CoS thus we will
1923 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1924 * bp->max_cos.
1925 *
1926 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1927 * index after all ETH L2 indices.
1928 *
1929 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1930 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001931 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001932 *
1933 * The proper configuration of skb->queue_mapping is handled by
1934 * bnx2x_select_queue() and __skb_tx_hash().
1935 *
1936 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1937 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1938 */
Merav Sicron55c11942012-11-07 00:45:48 +00001939static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001940{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001941 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001942
Merav Sicron65565882012-06-19 07:48:26 +00001943 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001944 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001945
1946/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001947 if (include_cnic && !NO_FCOE(bp)) {
1948 rx++;
1949 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001950 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001951
1952 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1953 if (rc) {
1954 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1955 return rc;
1956 }
1957 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1958 if (rc) {
1959 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1960 return rc;
1961 }
1962
Merav Sicron51c1a582012-03-18 10:33:38 +00001963 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001964 tx, rx);
1965
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001966 return rc;
1967}
1968
Eric Dumazet1191cb82012-04-27 21:39:21 +00001969static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001970{
1971 int i;
1972
1973 for_each_queue(bp, i) {
1974 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001975 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001976
1977 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1978 if (IS_FCOE_IDX(i))
1979 /*
1980 * Although there are no IP frames expected to arrive to
1981 * this ring we still want to add an
1982 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1983 * overrun attack.
1984 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001985 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001986 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001987 mtu = bp->dev->mtu;
1988 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1989 IP_HEADER_ALIGNMENT_PADDING +
1990 ETH_OVREHEAD +
1991 mtu +
1992 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001993 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001994 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1995 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1996 else
1997 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001998 }
1999}
2000
Ariel Elior60cad4e2013-09-04 14:09:22 +03002001static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002002{
2003 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002004 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2005
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002006 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002007 * enabled
2008 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002009 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2010 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002011 bp->fp->cl_id +
2012 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002013
2014 /*
2015 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2016 * per-port, so if explicit configuration is needed , do it only
2017 * for a PMF.
2018 *
2019 * For 57712 and newer on the other hand it's a per-function
2020 * configuration.
2021 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002022 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002023}
2024
Ariel Elior60cad4e2013-09-04 14:09:22 +03002025int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2026 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002027{
Yuval Mintz3b603062012-03-18 10:33:39 +00002028 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002029
2030 /* Although RSS is meaningless when there is a single HW queue we
2031 * still need it enabled in order to have HW Rx hash generated.
2032 *
2033 * if (!is_eth_multi(bp))
2034 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2035 */
2036
Dmitry Kravkov96305232012-04-03 18:41:30 +00002037 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002038
2039 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2040
Ariel Elior60cad4e2013-09-04 14:09:22 +03002041 if (enable) {
2042 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002043
Ariel Elior60cad4e2013-09-04 14:09:22 +03002044 /* RSS configuration */
2045 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2046 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2047 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2048 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2049 if (rss_obj->udp_rss_v4)
2050 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2051 if (rss_obj->udp_rss_v6)
2052 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2053 } else {
2054 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2055 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002056
Dmitry Kravkov96305232012-04-03 18:41:30 +00002057 /* Hash bits */
2058 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002060 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002061
Dmitry Kravkov96305232012-04-03 18:41:30 +00002062 if (config_hash) {
2063 /* RSS keys */
Ariel Elior60cad4e2013-09-04 14:09:22 +03002064 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002065 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002066 }
2067
Ariel Elior60cad4e2013-09-04 14:09:22 +03002068 if (IS_PF(bp))
2069 return bnx2x_config_rss(bp, &params);
2070 else
2071 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002072}
2073
Eric Dumazet1191cb82012-04-27 21:39:21 +00002074static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002075{
Yuval Mintz3b603062012-03-18 10:33:39 +00002076 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002077
2078 /* Prepare parameters for function state transitions */
2079 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2080
2081 func_params.f_obj = &bp->func_obj;
2082 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2083
2084 func_params.params.hw_init.load_phase = load_code;
2085
2086 return bnx2x_func_state_change(bp, &func_params);
2087}
2088
2089/*
2090 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002091 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002092 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002093void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002094{
2095 int rc;
2096 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002097 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002098 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002099
2100 /***************** Cleanup MACs' object first *************************/
2101
2102 /* Wait for completion of requested */
2103 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2104 /* Perform a dry cleanup */
2105 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2106
2107 /* Clean ETH primary MAC */
2108 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002109 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002110 &ramrod_flags);
2111 if (rc != 0)
2112 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2113
2114 /* Cleanup UC list */
2115 vlan_mac_flags = 0;
2116 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2117 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2118 &ramrod_flags);
2119 if (rc != 0)
2120 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2121
2122 /***************** Now clean mcast object *****************************/
2123 rparam.mcast_obj = &bp->mcast_obj;
2124 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2125
Yuval Mintz8b09be52013-08-01 17:30:59 +03002126 /* Add a DEL command... - Since we're doing a driver cleanup only,
2127 * we take a lock surrounding both the initial send and the CONTs,
2128 * as we don't want a true completion to disrupt us in the middle.
2129 */
2130 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002131 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2132 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002133 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2134 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135
2136 /* ...and wait until all pending commands are cleared */
2137 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2138 while (rc != 0) {
2139 if (rc < 0) {
2140 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2141 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002142 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002143 return;
2144 }
2145
2146 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2147 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002148 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002149}
2150
2151#ifndef BNX2X_STOP_ON_ERROR
2152#define LOAD_ERROR_EXIT(bp, label) \
2153 do { \
2154 (bp)->state = BNX2X_STATE_ERROR; \
2155 goto label; \
2156 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002157
2158#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2159 do { \
2160 bp->cnic_loaded = false; \
2161 goto label; \
2162 } while (0)
2163#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002164#define LOAD_ERROR_EXIT(bp, label) \
2165 do { \
2166 (bp)->state = BNX2X_STATE_ERROR; \
2167 (bp)->panic = 1; \
2168 return -EBUSY; \
2169 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002170#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2171 do { \
2172 bp->cnic_loaded = false; \
2173 (bp)->panic = 1; \
2174 return -EBUSY; \
2175 } while (0)
2176#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002177
Ariel Eliorad5afc82013-01-01 05:22:26 +00002178static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002179{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002180 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2181 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2182 return;
2183}
Yuval Mintz452427b2012-03-26 20:47:07 +00002184
Ariel Eliorad5afc82013-01-01 05:22:26 +00002185static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2186{
Ariel Elior8db573b2013-01-01 05:22:37 +00002187 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002188 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002189
Ariel Eliorad5afc82013-01-01 05:22:26 +00002190 /* number of queues for statistics is number of eth queues + FCoE */
2191 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002192
Ariel Eliorad5afc82013-01-01 05:22:26 +00002193 /* Total number of FW statistics requests =
2194 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2195 * and fcoe l2 queue) stats + num of queues (which includes another 1
2196 * for fcoe l2 queue if applicable)
2197 */
2198 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2199
Ariel Elior8db573b2013-01-01 05:22:37 +00002200 /* vf stats appear in the request list, but their data is allocated by
2201 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2202 * it is used to determine where to place the vf stats queries in the
2203 * request struct
2204 */
2205 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002206 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002207
Ariel Eliorad5afc82013-01-01 05:22:26 +00002208 /* Request is built from stats_query_header and an array of
2209 * stats_query_cmd_group each of which contains
2210 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2211 * configured in the stats_query_header.
2212 */
2213 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002214 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2215 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002216 1 : 0));
2217
Ariel Elior8db573b2013-01-01 05:22:37 +00002218 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2219 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002220 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2221 num_groups * sizeof(struct stats_query_cmd_group);
2222
2223 /* Data for statistics requests + stats_counter
2224 * stats_counter holds per-STORM counters that are incremented
2225 * when STORM has finished with the current request.
2226 * memory for FCoE offloaded statistics are counted anyway,
2227 * even if they will not be sent.
2228 * VF stats are not accounted for here as the data of VF stats is stored
2229 * in memory allocated by the VF, not here.
2230 */
2231 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2232 sizeof(struct per_pf_stats) +
2233 sizeof(struct fcoe_statistics_params) +
2234 sizeof(struct per_queue_stats) * num_queue_stats +
2235 sizeof(struct stats_counter);
2236
2237 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2238 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239
2240 /* Set shortcuts */
2241 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2242 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2243 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2244 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2245 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2246 bp->fw_stats_req_sz;
2247
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002248 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002249 U64_HI(bp->fw_stats_req_mapping),
2250 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002251 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002252 U64_HI(bp->fw_stats_data_mapping),
2253 U64_LO(bp->fw_stats_data_mapping));
2254 return 0;
2255
2256alloc_mem_err:
2257 bnx2x_free_fw_stats_mem(bp);
2258 BNX2X_ERR("Can't allocate FW stats memory\n");
2259 return -ENOMEM;
2260}
2261
2262/* send load request to mcp and analyze response */
2263static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2264{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002265 u32 param;
2266
Ariel Eliorad5afc82013-01-01 05:22:26 +00002267 /* init fw_seq */
2268 bp->fw_seq =
2269 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2270 DRV_MSG_SEQ_NUMBER_MASK);
2271 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2272
2273 /* Get current FW pulse sequence */
2274 bp->fw_drv_pulse_wr_seq =
2275 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2276 DRV_PULSE_SEQ_MASK);
2277 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2278
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002279 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2280
2281 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2282 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2283
Ariel Eliorad5afc82013-01-01 05:22:26 +00002284 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002285 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002286
2287 /* if mcp fails to respond we must abort */
2288 if (!(*load_code)) {
2289 BNX2X_ERR("MCP response failure, aborting\n");
2290 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002291 }
2292
Ariel Eliorad5afc82013-01-01 05:22:26 +00002293 /* If mcp refused (e.g. other port is in diagnostic mode) we
2294 * must abort
2295 */
2296 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2297 BNX2X_ERR("MCP refused load request, aborting\n");
2298 return -EBUSY;
2299 }
2300 return 0;
2301}
2302
2303/* check whether another PF has already loaded FW to chip. In
2304 * virtualized environments a pf from another VM may have already
2305 * initialized the device including loading FW
2306 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002307int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002308{
2309 /* is another pf loaded on this engine? */
2310 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2311 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2312 /* build my FW version dword */
2313 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2314 (BCM_5710_FW_MINOR_VERSION << 8) +
2315 (BCM_5710_FW_REVISION_VERSION << 16) +
2316 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2317
2318 /* read loaded FW from chip */
2319 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2320
2321 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2322 loaded_fw, my_fw);
2323
2324 /* abort nic load if version mismatch */
2325 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002326 if (print_err)
2327 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2328 loaded_fw, my_fw);
2329 else
2330 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2331 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002332 return -EBUSY;
2333 }
2334 }
2335 return 0;
2336}
2337
2338/* returns the "mcp load_code" according to global load_count array */
2339static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2340{
2341 int path = BP_PATH(bp);
2342
2343 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002344 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2345 bnx2x_load_count[path][2]);
2346 bnx2x_load_count[path][0]++;
2347 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002348 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002349 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2350 bnx2x_load_count[path][2]);
2351 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002352 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002353 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002354 return FW_MSG_CODE_DRV_LOAD_PORT;
2355 else
2356 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2357}
2358
2359/* mark PMF if applicable */
2360static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2361{
2362 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2363 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2364 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2365 bp->port.pmf = 1;
2366 /* We need the barrier to ensure the ordering between the
2367 * writing to bp->port.pmf here and reading it from the
2368 * bnx2x_periodic_task().
2369 */
2370 smp_mb();
2371 } else {
2372 bp->port.pmf = 0;
2373 }
2374
2375 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2376}
2377
2378static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2379{
2380 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2381 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2382 (bp->common.shmem2_base)) {
2383 if (SHMEM2_HAS(bp, dcc_support))
2384 SHMEM2_WR(bp, dcc_support,
2385 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2386 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2387 if (SHMEM2_HAS(bp, afex_driver_support))
2388 SHMEM2_WR(bp, afex_driver_support,
2389 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2390 }
2391
2392 /* Set AFEX default VLAN tag to an invalid value */
2393 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002394}
2395
Eric Dumazet1191cb82012-04-27 21:39:21 +00002396/**
2397 * bnx2x_bz_fp - zero content of the fastpath structure.
2398 *
2399 * @bp: driver handle
2400 * @index: fastpath index to be zeroed
2401 *
2402 * Makes sure the contents of the bp->fp[index].napi is kept
2403 * intact.
2404 */
2405static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2406{
2407 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002408 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002409 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002410 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002411
Eric Dumazet1191cb82012-04-27 21:39:21 +00002412 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002413 if (fp->tpa_info)
2414 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2415 sizeof(struct bnx2x_agg_info));
2416 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002417
2418 /* Restore the NAPI object as it has been already initialized */
2419 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002420 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002421 fp->bp = bp;
2422 fp->index = index;
2423 if (IS_ETH_FP(fp))
2424 fp->max_cos = bp->max_cos;
2425 else
2426 /* Special queues support only one CoS */
2427 fp->max_cos = 1;
2428
Merav Sicron65565882012-06-19 07:48:26 +00002429 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002430 if (IS_FCOE_FP(fp))
2431 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002432 if (IS_ETH_FP(fp))
2433 for_each_cos_in_tx_queue(fp, cos)
2434 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2435 BNX2X_NUM_ETH_QUEUES(bp) + index];
2436
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002437 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002438 * minimal size so it must be set prior to queue memory allocation
2439 */
2440 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2441 (bp->flags & GRO_ENABLE_FLAG &&
2442 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2443 if (bp->flags & TPA_ENABLE_FLAG)
2444 fp->mode = TPA_MODE_LRO;
2445 else if (bp->flags & GRO_ENABLE_FLAG)
2446 fp->mode = TPA_MODE_GRO;
2447
Eric Dumazet1191cb82012-04-27 21:39:21 +00002448 /* We don't want TPA on an FCoE L2 ring */
2449 if (IS_FCOE_FP(fp))
2450 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002451}
2452
2453int bnx2x_load_cnic(struct bnx2x *bp)
2454{
2455 int i, rc, port = BP_PORT(bp);
2456
2457 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2458
2459 mutex_init(&bp->cnic_mutex);
2460
Ariel Eliorad5afc82013-01-01 05:22:26 +00002461 if (IS_PF(bp)) {
2462 rc = bnx2x_alloc_mem_cnic(bp);
2463 if (rc) {
2464 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2465 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2466 }
Merav Sicron55c11942012-11-07 00:45:48 +00002467 }
2468
2469 rc = bnx2x_alloc_fp_mem_cnic(bp);
2470 if (rc) {
2471 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2472 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2473 }
2474
2475 /* Update the number of queues with the cnic queues */
2476 rc = bnx2x_set_real_num_queues(bp, 1);
2477 if (rc) {
2478 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2479 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2480 }
2481
2482 /* Add all CNIC NAPI objects */
2483 bnx2x_add_all_napi_cnic(bp);
2484 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2485 bnx2x_napi_enable_cnic(bp);
2486
2487 rc = bnx2x_init_hw_func_cnic(bp);
2488 if (rc)
2489 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2490
2491 bnx2x_nic_init_cnic(bp);
2492
Ariel Eliorad5afc82013-01-01 05:22:26 +00002493 if (IS_PF(bp)) {
2494 /* Enable Timer scan */
2495 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002496
Ariel Eliorad5afc82013-01-01 05:22:26 +00002497 /* setup cnic queues */
2498 for_each_cnic_queue(bp, i) {
2499 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2500 if (rc) {
2501 BNX2X_ERR("Queue setup failed\n");
2502 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2503 }
Merav Sicron55c11942012-11-07 00:45:48 +00002504 }
2505 }
2506
2507 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002508 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002509
2510 /* re-read iscsi info */
2511 bnx2x_get_iscsi_info(bp);
2512 bnx2x_setup_cnic_irq_info(bp);
2513 bnx2x_setup_cnic_info(bp);
2514 bp->cnic_loaded = true;
2515 if (bp->state == BNX2X_STATE_OPEN)
2516 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2517
Merav Sicron55c11942012-11-07 00:45:48 +00002518 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2519
2520 return 0;
2521
2522#ifndef BNX2X_STOP_ON_ERROR
2523load_error_cnic2:
2524 /* Disable Timer scan */
2525 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2526
2527load_error_cnic1:
2528 bnx2x_napi_disable_cnic(bp);
2529 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002530 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002531 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2532load_error_cnic0:
2533 BNX2X_ERR("CNIC-related load failed\n");
2534 bnx2x_free_fp_mem_cnic(bp);
2535 bnx2x_free_mem_cnic(bp);
2536 return rc;
2537#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002538}
2539
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002540/* must be called with rtnl_lock */
2541int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2542{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002543 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002544 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002545
Merav Sicron55c11942012-11-07 00:45:48 +00002546 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2547 DP(NETIF_MSG_IFUP,
2548 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2549
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002550#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002551 if (unlikely(bp->panic)) {
2552 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002553 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002554 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002555#endif
2556
2557 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2558
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002559 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002560 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2561 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2562 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002563
Ariel Eliorad5afc82013-01-01 05:22:26 +00002564 if (IS_PF(bp))
2565 /* must be called before memory allocation and HW init */
2566 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002567
Ariel Elior6383c0b2011-07-14 08:31:57 +00002568 /*
2569 * Zero fastpath structures preserving invariants like napi, which are
2570 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002571 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002572 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002573 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002574 for_each_queue(bp, i)
2575 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002576 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2577 bp->num_cnic_queues) *
2578 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002579
Merav Sicron55c11942012-11-07 00:45:48 +00002580 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002581
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002582 /* Set the receive queues buffer size */
2583 bnx2x_set_rx_buf_size(bp);
2584
Ariel Eliorad5afc82013-01-01 05:22:26 +00002585 if (IS_PF(bp)) {
2586 rc = bnx2x_alloc_mem(bp);
2587 if (rc) {
2588 BNX2X_ERR("Unable to allocate bp memory\n");
2589 return rc;
2590 }
2591 }
2592
Ariel Eliorad5afc82013-01-01 05:22:26 +00002593 /* need to be done after alloc mem, since it's self adjusting to amount
2594 * of memory available for RSS queues
2595 */
2596 rc = bnx2x_alloc_fp_mem(bp);
2597 if (rc) {
2598 BNX2X_ERR("Unable to allocate memory for fps\n");
2599 LOAD_ERROR_EXIT(bp, load_error0);
2600 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002601
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002602 /* Allocated memory for FW statistics */
2603 if (bnx2x_alloc_fw_stats_mem(bp))
2604 LOAD_ERROR_EXIT(bp, load_error0);
2605
Ariel Elior8d9ac292013-01-01 05:22:27 +00002606 /* request pf to initialize status blocks */
2607 if (IS_VF(bp)) {
2608 rc = bnx2x_vfpf_init(bp);
2609 if (rc)
2610 LOAD_ERROR_EXIT(bp, load_error0);
2611 }
2612
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002613 /* As long as bnx2x_alloc_mem() may possibly update
2614 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002615 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002616 */
Merav Sicron55c11942012-11-07 00:45:48 +00002617 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002618 if (rc) {
2619 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002620 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002621 }
2622
Ariel Elior6383c0b2011-07-14 08:31:57 +00002623 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002624 * this configuration may be overridden by a multi class queue
2625 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002626 */
2627 bnx2x_setup_tc(bp->dev, bp->max_cos);
2628
Merav Sicron26614ba2012-08-27 03:26:19 +00002629 /* Add all NAPI objects */
2630 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002631 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002632 bnx2x_napi_enable(bp);
2633
Ariel Eliorad5afc82013-01-01 05:22:26 +00002634 if (IS_PF(bp)) {
2635 /* set pf load just before approaching the MCP */
2636 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002637
Ariel Eliorad5afc82013-01-01 05:22:26 +00002638 /* if mcp exists send load request and analyze response */
2639 if (!BP_NOMCP(bp)) {
2640 /* attempt to load pf */
2641 rc = bnx2x_nic_load_request(bp, &load_code);
2642 if (rc)
2643 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002644
Ariel Eliorad5afc82013-01-01 05:22:26 +00002645 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002646 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002647 if (rc) {
2648 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002649 LOAD_ERROR_EXIT(bp, load_error2);
2650 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002651 } else {
2652 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002653 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002654
Ariel Eliorad5afc82013-01-01 05:22:26 +00002655 /* mark pmf if applicable */
2656 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002657
Ariel Eliorad5afc82013-01-01 05:22:26 +00002658 /* Init Function state controlling object */
2659 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002660
Ariel Eliorad5afc82013-01-01 05:22:26 +00002661 /* Initialize HW */
2662 rc = bnx2x_init_hw(bp, load_code);
2663 if (rc) {
2664 BNX2X_ERR("HW init failed, aborting\n");
2665 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2666 LOAD_ERROR_EXIT(bp, load_error2);
2667 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002668 }
2669
Yuval Mintzecf01c22013-04-22 02:53:03 +00002670 bnx2x_pre_irq_nic_init(bp);
2671
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002672 /* Connect to IRQs */
2673 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002674 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002675 BNX2X_ERR("setup irqs failed\n");
2676 if (IS_PF(bp))
2677 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002678 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002679 }
2680
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002681 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002682 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002683 /* Setup NIC internals and enable interrupts */
2684 bnx2x_post_irq_nic_init(bp, load_code);
2685
Ariel Eliorad5afc82013-01-01 05:22:26 +00002686 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002687 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002688
Ariel Eliorad5afc82013-01-01 05:22:26 +00002689 /* Set AFEX default VLAN tag to an invalid value */
2690 bp->afex_def_vlan_tag = -1;
2691 bnx2x_nic_load_afex_dcc(bp, load_code);
2692 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2693 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002694 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002695 BNX2X_ERR("Function start failed!\n");
2696 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2697
Merav Sicron55c11942012-11-07 00:45:48 +00002698 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002699 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002700
Ariel Eliorad5afc82013-01-01 05:22:26 +00002701 /* Send LOAD_DONE command to MCP */
2702 if (!BP_NOMCP(bp)) {
2703 load_code = bnx2x_fw_command(bp,
2704 DRV_MSG_CODE_LOAD_DONE, 0);
2705 if (!load_code) {
2706 BNX2X_ERR("MCP response failure, aborting\n");
2707 rc = -EBUSY;
2708 LOAD_ERROR_EXIT(bp, load_error3);
2709 }
2710 }
2711
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002712 /* initialize FW coalescing state machines in RAM */
2713 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002714 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002715
Ariel Elior60cad4e2013-09-04 14:09:22 +03002716 /* setup the leading queue */
2717 rc = bnx2x_setup_leading(bp);
2718 if (rc) {
2719 BNX2X_ERR("Setup leading failed!\n");
2720 LOAD_ERROR_EXIT(bp, load_error3);
2721 }
2722
2723 /* set up the rest of the queues */
2724 for_each_nondefault_eth_queue(bp, i) {
2725 if (IS_PF(bp))
2726 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2727 else /* VF */
2728 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002729 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002730 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002731 LOAD_ERROR_EXIT(bp, load_error3);
2732 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002733 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002734
Ariel Elior60cad4e2013-09-04 14:09:22 +03002735 /* setup rss */
2736 rc = bnx2x_init_rss(bp);
2737 if (rc) {
2738 BNX2X_ERR("PF RSS init failed\n");
2739 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002740 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002741
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002742 /* Now when Clients are configured we are ready to work */
2743 bp->state = BNX2X_STATE_OPEN;
2744
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002745 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002746 if (IS_PF(bp))
2747 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002748 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002749 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2750 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002751 if (rc) {
2752 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002753 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002754 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002755
Ariel Eliorad5afc82013-01-01 05:22:26 +00002756 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002757 bnx2x_update_max_mf_config(bp, bp->pending_max);
2758 bp->pending_max = 0;
2759 }
2760
Ariel Eliorad5afc82013-01-01 05:22:26 +00002761 if (bp->port.pmf) {
2762 rc = bnx2x_initial_phy_init(bp, load_mode);
2763 if (rc)
2764 LOAD_ERROR_EXIT(bp, load_error3);
2765 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002766 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002767
2768 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002769
2770 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002771 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002772
2773 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002774 switch (load_mode) {
2775 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002776 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002777 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002778 break;
2779
2780 case LOAD_OPEN:
2781 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002782 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002783 break;
2784
2785 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002786 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002787 bp->state = BNX2X_STATE_DIAG;
2788 break;
2789
2790 default:
2791 break;
2792 }
2793
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002794 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002795 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002796 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002797 bnx2x__link_status_update(bp);
2798
2799 /* start the timer */
2800 mod_timer(&bp->timer, jiffies + bp->current_interval);
2801
Merav Sicron55c11942012-11-07 00:45:48 +00002802 if (CNIC_ENABLED(bp))
2803 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002804
Ariel Eliorad5afc82013-01-01 05:22:26 +00002805 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2806 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002807 u32 val;
2808 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2809 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2810 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2811 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2812 }
2813
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002814 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002815 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002816 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002817 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002818 return -EBUSY;
2819 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002820
Barak Witkowski98768792012-06-19 07:48:31 +00002821 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2822 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2823 bnx2x_dcbx_init(bp, false);
2824
Merav Sicron55c11942012-11-07 00:45:48 +00002825 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2826
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827 return 0;
2828
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002829#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002830load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002831 if (IS_PF(bp)) {
2832 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002833
Ariel Eliorad5afc82013-01-01 05:22:26 +00002834 /* Clean queueable objects */
2835 bnx2x_squeeze_objects(bp);
2836 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002837
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002838 /* Free SKBs, SGEs, TPA pool and driver internals */
2839 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002840 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002841 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002842
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002843 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002844 bnx2x_free_irq(bp);
2845load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002846 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002847 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2848 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2849 }
2850
2851 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002852load_error1:
2853 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002854 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002855
Ariel Elior889b9af2012-01-26 06:01:51 +00002856 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002857 if (IS_PF(bp))
2858 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002859load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002860 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002861 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002862 bnx2x_free_mem(bp);
2863
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002865#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002866}
2867
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002868int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002869{
2870 u8 rc = 0, cos, i;
2871
2872 /* Wait until tx fastpath tasks complete */
2873 for_each_tx_queue(bp, i) {
2874 struct bnx2x_fastpath *fp = &bp->fp[i];
2875
2876 for_each_cos_in_tx_queue(fp, cos)
2877 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2878 if (rc)
2879 return rc;
2880 }
2881 return 0;
2882}
2883
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002884/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002885int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002886{
2887 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002888 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889
Merav Sicron55c11942012-11-07 00:45:48 +00002890 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2891
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002892 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002893 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002894 u32 val;
2895 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2896 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2897 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2898 }
2899
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002900 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002901 (bp->state == BNX2X_STATE_CLOSED ||
2902 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002903 /* We can get here if the driver has been unloaded
2904 * during parity error recovery and is either waiting for a
2905 * leader to complete or for other functions to unload and
2906 * then ifdown has been issued. In this case we want to
2907 * unload and let other functions to complete a recovery
2908 * process.
2909 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002910 bp->recovery_state = BNX2X_RECOVERY_DONE;
2911 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002912 bnx2x_release_leader_lock(bp);
2913 smp_mb();
2914
Merav Sicron51c1a582012-03-18 10:33:38 +00002915 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2916 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002917 return -EINVAL;
2918 }
2919
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002920 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002921 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002922 *
2923 * we can get here only after unsuccessful ndo_* callback, during which
2924 * dev->IFF_UP flag is still on.
2925 */
2926 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2927 return 0;
2928
2929 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002930 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2931 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2932 */
2933 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2934 smp_mb();
2935
Ariel Elior78c3bcc2013-06-20 17:39:08 +03002936 /* indicate to VFs that the PF is going down */
2937 bnx2x_iov_channel_down(bp);
2938
Merav Sicron55c11942012-11-07 00:45:48 +00002939 if (CNIC_LOADED(bp))
2940 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2941
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002942 /* Stop Tx */
2943 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002944 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002945
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002946 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002947
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002948 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002949
Ariel Eliorad5afc82013-01-01 05:22:26 +00002950 if (IS_PF(bp)) {
2951 /* Set ALWAYS_ALIVE bit in shmem */
2952 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2953 bnx2x_drv_pulse(bp);
2954 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2955 bnx2x_save_statistics(bp);
2956 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002957
Ariel Eliorad5afc82013-01-01 05:22:26 +00002958 /* wait till consumers catch up with producers in all queues */
2959 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002960
Ariel Elior9b176b62013-01-01 05:22:28 +00002961 /* if VF indicate to PF this function is going down (PF will delete sp
2962 * elements and clear initializations
2963 */
2964 if (IS_VF(bp))
2965 bnx2x_vfpf_close_vf(bp);
2966 else if (unload_mode != UNLOAD_RECOVERY)
2967 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002968 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002969 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002970 /* Send the UNLOAD_REQUEST to the MCP */
2971 bnx2x_send_unload_req(bp, unload_mode);
2972
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002973 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002974 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002975 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002976 * (the engine which leader will perform the recovery
2977 * last).
2978 */
2979 if (!CHIP_IS_E1x(bp))
2980 bnx2x_pf_disable(bp);
2981
2982 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002983 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002984 /* Delete all NAPI objects */
2985 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002986 if (CNIC_LOADED(bp))
2987 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002988 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002989 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002990
2991 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002992 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002993 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002994
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002995 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002996 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002997 * the queueable objects here in case they failed to get cleaned so far.
2998 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002999 if (IS_PF(bp))
3000 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003001
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003002 /* There should be no more pending SP commands at this stage */
3003 bp->sp_state = 0;
3004
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003005 bp->port.pmf = 0;
3006
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003007 /* clear pending work in rtnl task */
3008 bp->sp_rtnl_state = 0;
3009 smp_mb();
3010
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003011 /* Free SKBs, SGEs, TPA pool and driver internals */
3012 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003013 if (CNIC_LOADED(bp))
3014 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003015 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003016 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003017
Ariel Eliorad5afc82013-01-01 05:22:26 +00003018 bnx2x_free_fp_mem(bp);
3019 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003020 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003021
Ariel Eliorad5afc82013-01-01 05:22:26 +00003022 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003023 if (CNIC_LOADED(bp))
3024 bnx2x_free_mem_cnic(bp);
3025 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003026 bnx2x_free_mem(bp);
3027
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003028 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003029 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003030
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003031 /* Check if there are pending parity attentions. If there are - set
3032 * RECOVERY_IN_PROGRESS.
3033 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003034 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003035 bnx2x_set_reset_in_progress(bp);
3036
3037 /* Set RESET_IS_GLOBAL if needed */
3038 if (global)
3039 bnx2x_set_reset_global(bp);
3040 }
3041
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003042 /* The last driver must disable a "close the gate" if there is no
3043 * parity attention or "process kill" pending.
3044 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003045 if (IS_PF(bp) &&
3046 !bnx2x_clear_pf_load(bp) &&
3047 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003048 bnx2x_disable_close_the_gate(bp);
3049
Merav Sicron55c11942012-11-07 00:45:48 +00003050 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3051
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003052 return 0;
3053}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003054
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003055int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3056{
3057 u16 pmcsr;
3058
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003059 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003060 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003061 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003062 return 0;
3063 }
3064
Jon Mason29ed74c2013-09-11 11:22:39 -07003065 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003066
3067 switch (state) {
3068 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003069 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003070 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3071 PCI_PM_CTRL_PME_STATUS));
3072
3073 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3074 /* delay required during transition out of D3hot */
3075 msleep(20);
3076 break;
3077
3078 case PCI_D3hot:
3079 /* If there are other clients above don't
3080 shut down the power */
3081 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3082 return 0;
3083 /* Don't shut down the power for emulation and FPGA */
3084 if (CHIP_REV_IS_SLOW(bp))
3085 return 0;
3086
3087 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3088 pmcsr |= 3;
3089
3090 if (bp->wol)
3091 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3092
Jon Mason29ed74c2013-09-11 11:22:39 -07003093 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003094 pmcsr);
3095
3096 /* No more memory access after this point until
3097 * device is brought back to D0.
3098 */
3099 break;
3100
3101 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003102 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003103 return -EINVAL;
3104 }
3105 return 0;
3106}
3107
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003108/*
3109 * net_device service functions
3110 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003111static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003112{
3113 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003114 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003115 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3116 napi);
3117 struct bnx2x *bp = fp->bp;
3118
3119 while (1) {
3120#ifdef BNX2X_STOP_ON_ERROR
3121 if (unlikely(bp->panic)) {
3122 napi_complete(napi);
3123 return 0;
3124 }
3125#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003126 if (!bnx2x_fp_lock_napi(fp))
3127 return work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003128
Ariel Elior6383c0b2011-07-14 08:31:57 +00003129 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003130 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3131 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003132
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003133 if (bnx2x_has_rx_work(fp)) {
3134 work_done += bnx2x_rx_int(fp, budget - work_done);
3135
3136 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003137 if (work_done >= budget) {
3138 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003139 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003140 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003141 }
3142
3143 /* Fall out from the NAPI loop if needed */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003144 if (!bnx2x_fp_unlock_napi(fp) &&
3145 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003146
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003147 /* No need to update SB for FCoE L2 ring as long as
3148 * it's connected to the default SB and the SB
3149 * has been updated when NAPI was scheduled.
3150 */
3151 if (IS_FCOE_FP(fp)) {
3152 napi_complete(napi);
3153 break;
3154 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003155 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003156 /* bnx2x_has_rx_work() reads the status block,
3157 * thus we need to ensure that status block indices
3158 * have been actually read (bnx2x_update_fpsb_idx)
3159 * prior to this check (bnx2x_has_rx_work) so that
3160 * we won't write the "newer" value of the status block
3161 * to IGU (if there was a DMA right after
3162 * bnx2x_has_rx_work and if there is no rmb, the memory
3163 * reading (bnx2x_update_fpsb_idx) may be postponed
3164 * to right before bnx2x_ack_sb). In this case there
3165 * will never be another interrupt until there is
3166 * another update of the status block, while there
3167 * is still unhandled work.
3168 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003169 rmb();
3170
3171 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3172 napi_complete(napi);
3173 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003174 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003175 "Update index to %d\n", fp->fp_hc_idx);
3176 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3177 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003178 IGU_INT_ENABLE, 1);
3179 break;
3180 }
3181 }
3182 }
3183
3184 return work_done;
3185}
3186
Cong Wange0d10952013-08-01 11:10:25 +08003187#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003188/* must be called with local_bh_disable()d */
3189int bnx2x_low_latency_recv(struct napi_struct *napi)
3190{
3191 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3192 napi);
3193 struct bnx2x *bp = fp->bp;
3194 int found = 0;
3195
3196 if ((bp->state == BNX2X_STATE_CLOSED) ||
3197 (bp->state == BNX2X_STATE_ERROR) ||
3198 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3199 return LL_FLUSH_FAILED;
3200
3201 if (!bnx2x_fp_lock_poll(fp))
3202 return LL_FLUSH_BUSY;
3203
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003204 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003205 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003206
3207 bnx2x_fp_unlock_poll(fp);
3208
3209 return found;
3210}
3211#endif
3212
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003213/* we split the first BD into headers and data BDs
3214 * to ease the pain of our fellow microcode engineers
3215 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003216 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003217static u16 bnx2x_tx_split(struct bnx2x *bp,
3218 struct bnx2x_fp_txdata *txdata,
3219 struct sw_tx_bd *tx_buf,
3220 struct eth_tx_start_bd **tx_bd, u16 hlen,
3221 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003222{
3223 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3224 struct eth_tx_bd *d_tx_bd;
3225 dma_addr_t mapping;
3226 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3227
3228 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003229 h_tx_bd->nbytes = cpu_to_le16(hlen);
3230
Dmitry Kravkov91226792013-03-11 05:17:52 +00003231 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3232 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003233
3234 /* now get a new data BD
3235 * (after the pbd) and fill it */
3236 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003237 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003238
3239 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3240 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3241
3242 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3243 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3244 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3245
3246 /* this marks the BD as one that has no individual mapping */
3247 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3248
3249 DP(NETIF_MSG_TX_QUEUED,
3250 "TSO split data size is %d (%x:%x)\n",
3251 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3252
3253 /* update tx_bd */
3254 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3255
3256 return bd_prod;
3257}
3258
Yuval Mintz86564c32013-01-23 03:21:50 +00003259#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3260#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003261static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003262{
Yuval Mintz86564c32013-01-23 03:21:50 +00003263 __sum16 tsum = (__force __sum16) csum;
3264
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003265 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003266 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3267 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003268
3269 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003270 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3271 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003272
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003273 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003274}
3275
Dmitry Kravkov91226792013-03-11 05:17:52 +00003276static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003277{
3278 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003279 __u8 prot = 0;
3280 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003281
3282 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003283 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003284
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003285 protocol = vlan_get_protocol(skb);
3286 if (protocol == htons(ETH_P_IPV6)) {
3287 rc = XMIT_CSUM_V6;
3288 prot = ipv6_hdr(skb)->nexthdr;
3289 } else {
3290 rc = XMIT_CSUM_V4;
3291 prot = ip_hdr(skb)->protocol;
3292 }
3293
3294 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3295 if (inner_ip_hdr(skb)->version == 6) {
3296 rc |= XMIT_CSUM_ENC_V6;
3297 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003298 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003299 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003300 rc |= XMIT_CSUM_ENC_V4;
3301 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003302 rc |= XMIT_CSUM_TCP;
3303 }
3304 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003305 if (prot == IPPROTO_TCP)
3306 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003307
Eric Dumazet36a8f392013-09-29 01:21:32 -07003308 if (skb_is_gso(skb)) {
3309 if (skb_is_gso_v6(skb)) {
3310 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3311 if (rc & XMIT_CSUM_ENC)
3312 rc |= XMIT_GSO_ENC_V6;
3313 } else {
3314 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3315 if (rc & XMIT_CSUM_ENC)
3316 rc |= XMIT_GSO_ENC_V4;
3317 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003318 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003319
3320 return rc;
3321}
3322
3323#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3324/* check if packet requires linearization (packet is too fragmented)
3325 no need to check fragmentation if page size > 8K (there will be no
3326 violation to FW restrictions) */
3327static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3328 u32 xmit_type)
3329{
3330 int to_copy = 0;
3331 int hlen = 0;
3332 int first_bd_sz = 0;
3333
3334 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3335 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3336
3337 if (xmit_type & XMIT_GSO) {
3338 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3339 /* Check if LSO packet needs to be copied:
3340 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3341 int wnd_size = MAX_FETCH_BD - 3;
3342 /* Number of windows to check */
3343 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3344 int wnd_idx = 0;
3345 int frag_idx = 0;
3346 u32 wnd_sum = 0;
3347
3348 /* Headers length */
3349 hlen = (int)(skb_transport_header(skb) - skb->data) +
3350 tcp_hdrlen(skb);
3351
3352 /* Amount of data (w/o headers) on linear part of SKB*/
3353 first_bd_sz = skb_headlen(skb) - hlen;
3354
3355 wnd_sum = first_bd_sz;
3356
3357 /* Calculate the first sum - it's special */
3358 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3359 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003360 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003361
3362 /* If there was data on linear skb data - check it */
3363 if (first_bd_sz > 0) {
3364 if (unlikely(wnd_sum < lso_mss)) {
3365 to_copy = 1;
3366 goto exit_lbl;
3367 }
3368
3369 wnd_sum -= first_bd_sz;
3370 }
3371
3372 /* Others are easier: run through the frag list and
3373 check all windows */
3374 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3375 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003376 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003377
3378 if (unlikely(wnd_sum < lso_mss)) {
3379 to_copy = 1;
3380 break;
3381 }
3382 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003383 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003384 }
3385 } else {
3386 /* in non-LSO too fragmented packet should always
3387 be linearized */
3388 to_copy = 1;
3389 }
3390 }
3391
3392exit_lbl:
3393 if (unlikely(to_copy))
3394 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003395 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003396 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3397 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3398
3399 return to_copy;
3400}
3401#endif
3402
Dmitry Kravkov91226792013-03-11 05:17:52 +00003403static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3404 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003405{
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003406 struct ipv6hdr *ipv6;
3407
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003408 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3409 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3410 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003411
3412 if (xmit_type & XMIT_GSO_ENC_V6)
3413 ipv6 = inner_ipv6_hdr(skb);
3414 else if (xmit_type & XMIT_GSO_V6)
3415 ipv6 = ipv6_hdr(skb);
3416 else
3417 ipv6 = NULL;
3418
3419 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003420 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003421}
3422
3423/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003424 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003425 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003426 * @skb: packet skb
3427 * @pbd: parse BD
3428 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003429 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003430static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3431 struct eth_tx_parse_bd_e1x *pbd,
Yuval Mintz057cf652013-05-19 04:41:01 +00003432 struct eth_tx_start_bd *tx_start_bd,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003433 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003434{
3435 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003436 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003437 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003438
3439 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003440 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003441 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003442 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3443 ip_hdr(skb)->daddr,
3444 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003445
Yuval Mintz057cf652013-05-19 04:41:01 +00003446 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3447 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3448 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003449 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003450 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3451 &ipv6_hdr(skb)->daddr,
3452 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003453 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003454
Yuval Mintz86564c32013-01-23 03:21:50 +00003455 pbd->global_data |=
3456 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003457}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003458
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003459/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003460 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3461 *
3462 * @bp: driver handle
3463 * @skb: packet skb
3464 * @parsing_data: data to be updated
3465 * @xmit_type: xmit flags
3466 *
3467 * 57712/578xx related, when skb has encapsulation
3468 */
3469static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3470 u32 *parsing_data, u32 xmit_type)
3471{
3472 *parsing_data |=
3473 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3474 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3475 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3476
3477 if (xmit_type & XMIT_CSUM_TCP) {
3478 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3479 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3480 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3481
3482 return skb_inner_transport_header(skb) +
3483 inner_tcp_hdrlen(skb) - skb->data;
3484 }
3485
3486 /* We support checksum offload for TCP and UDP only.
3487 * No need to pass the UDP header length - it's a constant.
3488 */
3489 return skb_inner_transport_header(skb) +
3490 sizeof(struct udphdr) - skb->data;
3491}
3492
3493/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003494 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003495 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003496 * @bp: driver handle
3497 * @skb: packet skb
3498 * @parsing_data: data to be updated
3499 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003500 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003501 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003502 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003503static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3504 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003505{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003506 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003507 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003508 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3509 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003510
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003511 if (xmit_type & XMIT_CSUM_TCP) {
3512 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3513 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3514 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003515
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003516 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003517 }
3518 /* We support checksum offload for TCP and UDP only.
3519 * No need to pass the UDP header length - it's a constant.
3520 */
3521 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003522}
3523
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003524/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003525static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3526 struct eth_tx_start_bd *tx_start_bd,
3527 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003528{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003529 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3530
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003531 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003532 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003533
3534 if (!(xmit_type & XMIT_CSUM_TCP))
3535 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003536}
3537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003538/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003539 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003540 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003541 * @bp: driver handle
3542 * @skb: packet skb
3543 * @pbd: parse BD to be updated
3544 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003545 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003546static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3547 struct eth_tx_parse_bd_e1x *pbd,
3548 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003549{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003550 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003551
3552 /* for now NS flag is not used in Linux */
3553 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003554 cpu_to_le16(hlen |
3555 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3556 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003557
3558 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003559 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003560
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003561 hlen += pbd->ip_hlen_w;
3562
3563 /* We support checksum offload for TCP and UDP only */
3564 if (xmit_type & XMIT_CSUM_TCP)
3565 hlen += tcp_hdrlen(skb) / 2;
3566 else
3567 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003568
3569 pbd->total_hlen_w = cpu_to_le16(hlen);
3570 hlen = hlen*2;
3571
3572 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003573 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003574
3575 } else {
3576 s8 fix = SKB_CS_OFF(skb); /* signed! */
3577
3578 DP(NETIF_MSG_TX_QUEUED,
3579 "hlen %d fix %d csum before fix %x\n",
3580 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3581
3582 /* HW bug: fixup the CSUM */
3583 pbd->tcp_pseudo_csum =
3584 bnx2x_csum_fix(skb_transport_header(skb),
3585 SKB_CS(skb), fix);
3586
3587 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3588 pbd->tcp_pseudo_csum);
3589 }
3590
3591 return hlen;
3592}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003593
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003594static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3595 struct eth_tx_parse_bd_e2 *pbd_e2,
3596 struct eth_tx_parse_2nd_bd *pbd2,
3597 u16 *global_data,
3598 u32 xmit_type)
3599{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003600 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003601 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003602
Dmitry Kravkove287a752013-03-21 15:38:24 +00003603 /* from outer IP to transport */
3604 hlen_w = (skb_inner_transport_header(skb) -
3605 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003606
3607 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003608 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003609
Dmitry Kravkove287a752013-03-21 15:38:24 +00003610 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003611
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003612 /* outer IP header info */
3613 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003614 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003615 u32 csum = (__force u32)(~iph->check) -
3616 (__force u32)iph->tot_len -
3617 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003618
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003619 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003620 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003621 } else {
3622 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003623 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003624 }
3625
3626 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3627
3628 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3629
3630 if (xmit_type & XMIT_GSO_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003631 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003632
3633 pbd_e2->data.tunnel_data.pseudo_csum =
3634 bswab16(~csum_tcpudp_magic(
3635 inner_ip_hdr(skb)->saddr,
3636 inner_ip_hdr(skb)->daddr,
3637 0, IPPROTO_TCP, 0));
3638
3639 outerip_len = ip_hdr(skb)->ihl << 1;
3640 } else {
3641 pbd_e2->data.tunnel_data.pseudo_csum =
3642 bswab16(~csum_ipv6_magic(
3643 &inner_ipv6_hdr(skb)->saddr,
3644 &inner_ipv6_hdr(skb)->daddr,
3645 0, IPPROTO_TCP, 0));
3646 }
3647
3648 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3649
3650 *global_data |=
3651 outerip_off |
3652 (!!(xmit_type & XMIT_CSUM_V6) <<
3653 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3654 (outerip_len <<
3655 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3656 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3657 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003658
3659 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3660 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3661 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3662 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003663}
3664
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003665/* called with netif_tx_lock
3666 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3667 * netif_wake_queue()
3668 */
3669netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3670{
3671 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003672
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003673 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003674 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003675 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003676 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003677 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003678 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003679 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003680 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003681 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003682 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003683 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003684 dma_addr_t mapping;
3685 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3686 int i;
3687 u8 hlen = 0;
3688 __le16 pkt_size = 0;
3689 struct ethhdr *eth;
3690 u8 mac_type = UNICAST_ADDRESS;
3691
3692#ifdef BNX2X_STOP_ON_ERROR
3693 if (unlikely(bp->panic))
3694 return NETDEV_TX_BUSY;
3695#endif
3696
Ariel Elior6383c0b2011-07-14 08:31:57 +00003697 txq_index = skb_get_queue_mapping(skb);
3698 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003699
Merav Sicron55c11942012-11-07 00:45:48 +00003700 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003701
Merav Sicron65565882012-06-19 07:48:26 +00003702 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003703
3704 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003705 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003706 txq_index, fp_index, txdata_index); */
3707
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003708 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003709 DP(NETIF_MSG_TX_QUEUED,
3710 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003711 txdata->cid, fp_index, txdata_index, txdata, fp); */
3712
3713 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003714 skb_shinfo(skb)->nr_frags +
3715 BDS_PER_TX_PKT +
3716 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003717 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003718 if (txdata->tx_ring_size == 0) {
3719 struct bnx2x_eth_q_stats *q_stats =
3720 bnx2x_fp_qstats(bp, txdata->parent_fp);
3721 q_stats->driver_filtered_tx_pkt++;
3722 dev_kfree_skb(skb);
3723 return NETDEV_TX_OK;
3724 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003725 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3726 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003727 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003728
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003729 return NETDEV_TX_BUSY;
3730 }
3731
Merav Sicron51c1a582012-03-18 10:33:38 +00003732 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003733 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003734 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003735 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3736 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003737
3738 eth = (struct ethhdr *)skb->data;
3739
3740 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3741 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3742 if (is_broadcast_ether_addr(eth->h_dest))
3743 mac_type = BROADCAST_ADDRESS;
3744 else
3745 mac_type = MULTICAST_ADDRESS;
3746 }
3747
Dmitry Kravkov91226792013-03-11 05:17:52 +00003748#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003749 /* First, check if we need to linearize the skb (due to FW
3750 restrictions). No need to check fragmentation if page size > 8K
3751 (there will be no violation to FW restrictions) */
3752 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3753 /* Statistics of linearization */
3754 bp->lin_cnt++;
3755 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003756 DP(NETIF_MSG_TX_QUEUED,
3757 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003758 dev_kfree_skb_any(skb);
3759 return NETDEV_TX_OK;
3760 }
3761 }
3762#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003763 /* Map skb linear data for DMA */
3764 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3765 skb_headlen(skb), DMA_TO_DEVICE);
3766 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003767 DP(NETIF_MSG_TX_QUEUED,
3768 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003769 dev_kfree_skb_any(skb);
3770 return NETDEV_TX_OK;
3771 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003772 /*
3773 Please read carefully. First we use one BD which we mark as start,
3774 then we have a parsing info BD (used for TSO or xsum),
3775 and only then we have the rest of the TSO BDs.
3776 (don't forget to mark the last one as last,
3777 and to unmap only AFTER you write to the BD ...)
3778 And above all, all pdb sizes are in words - NOT DWORDS!
3779 */
3780
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003781 /* get current pkt produced now - advance it just before sending packet
3782 * since mapping of pages may fail and cause packet to be dropped
3783 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003784 pkt_prod = txdata->tx_pkt_prod;
3785 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003786
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003787 /* get a tx_buf and first BD
3788 * tx_start_bd may be changed during SPLIT,
3789 * but first_bd will always stay first
3790 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003791 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3792 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003793 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003794
3795 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003796
Dmitry Kravkov91226792013-03-11 05:17:52 +00003797 /* header nbd: indirectly zero other flags! */
3798 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003799
3800 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003801 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003802 tx_buf->skb = skb;
3803 tx_buf->flags = 0;
3804
3805 DP(NETIF_MSG_TX_QUEUED,
3806 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003807 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003808
Jesse Grosseab6d182010-10-20 13:56:03 +00003809 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003810 tx_start_bd->vlan_or_ethertype =
3811 cpu_to_le16(vlan_tx_tag_get(skb));
3812 tx_start_bd->bd_flags.as_bitfield |=
3813 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003814 } else {
3815 /* when transmitting in a vf, start bd must hold the ethertype
3816 * for fw to enforce it
3817 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003818 if (IS_VF(bp))
Ariel Eliordc1ba592013-01-01 05:22:30 +00003819 tx_start_bd->vlan_or_ethertype =
3820 cpu_to_le16(ntohs(eth->h_proto));
Dmitry Kravkov91226792013-03-11 05:17:52 +00003821 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003822 /* used by FW for packet accounting */
3823 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003824 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003825
Dmitry Kravkov91226792013-03-11 05:17:52 +00003826 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3827
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003828 /* turn on parsing and get a BD */
3829 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003830
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003831 if (xmit_type & XMIT_CSUM)
3832 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003833
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003834 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003835 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003836 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003837
3838 if (xmit_type & XMIT_CSUM_ENC) {
3839 u16 global_data = 0;
3840
3841 /* Set PBD in enc checksum offload case */
3842 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3843 &pbd_e2_parsing_data,
3844 xmit_type);
3845
3846 /* turn on 2nd parsing and get a BD */
3847 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3848
3849 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3850
3851 memset(pbd2, 0, sizeof(*pbd2));
3852
3853 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3854 (skb_inner_network_header(skb) -
3855 skb->data) >> 1;
3856
3857 if (xmit_type & XMIT_GSO_ENC)
3858 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3859 &global_data,
3860 xmit_type);
3861
3862 pbd2->global_data = cpu_to_le16(global_data);
3863
3864 /* add addition parse BD indication to start BD */
3865 SET_FLAG(tx_start_bd->general_data,
3866 ETH_TX_START_BD_PARSE_NBDS, 1);
3867 /* set encapsulation flag in start BD */
3868 SET_FLAG(tx_start_bd->general_data,
3869 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3870 nbd++;
3871 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003872 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003873 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3874 &pbd_e2_parsing_data,
3875 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003876 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003877
Yuval Mintzbabe7232014-02-27 15:42:26 +02003878 /* Add the macs to the parsing BD if this is a vf or if
3879 * Tx Switching is enabled.
3880 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003881 if (IS_VF(bp)) {
3882 /* override GRE parameters in BD */
3883 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3884 &pbd_e2->data.mac_addr.src_mid,
3885 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003886 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003887
3888 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3889 &pbd_e2->data.mac_addr.dst_mid,
3890 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003891 eth->h_dest);
Yuval Mintzbabe7232014-02-27 15:42:26 +02003892 } else if (bp->flags & TX_SWITCHING) {
3893 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3894 &pbd_e2->data.mac_addr.dst_mid,
3895 &pbd_e2->data.mac_addr.dst_lo,
3896 eth->h_dest);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003897 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003898
3899 SET_FLAG(pbd_e2_parsing_data,
3900 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003901 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003902 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003903 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003904 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3905 /* Set PBD in checksum offload case */
3906 if (xmit_type & XMIT_CSUM)
3907 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003908
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003909 SET_FLAG(global_data,
3910 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3911 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003912 }
3913
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003914 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003915 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3916 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003917 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3918 pkt_size = tx_start_bd->nbytes;
3919
Merav Sicron51c1a582012-03-18 10:33:38 +00003920 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003921 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003922 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003923 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003924 tx_start_bd->bd_flags.as_bitfield,
3925 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003926
3927 if (xmit_type & XMIT_GSO) {
3928
3929 DP(NETIF_MSG_TX_QUEUED,
3930 "TSO packet len %d hlen %d total len %d tso size %d\n",
3931 skb->len, hlen, skb_headlen(skb),
3932 skb_shinfo(skb)->gso_size);
3933
3934 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3935
Dmitry Kravkov91226792013-03-11 05:17:52 +00003936 if (unlikely(skb_headlen(skb) > hlen)) {
3937 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003938 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3939 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003940 bd_prod);
3941 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003942 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003943 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3944 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003945 else
Yuval Mintz44dbc782013-06-03 02:59:57 +00003946 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003947 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003948
3949 /* Set the PBD's parsing_data field if not zero
3950 * (for the chips newer than 57711).
3951 */
3952 if (pbd_e2_parsing_data)
3953 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3954
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003955 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3956
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003957 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3959 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3960
Eric Dumazet9e903e02011-10-18 21:00:24 +00003961 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3962 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003963 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003964 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003965
Merav Sicron51c1a582012-03-18 10:33:38 +00003966 DP(NETIF_MSG_TX_QUEUED,
3967 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003968
3969 /* we need unmap all buffers already mapped
3970 * for this SKB;
3971 * first_bd->nbd need to be properly updated
3972 * before call to bnx2x_free_tx_pkt
3973 */
3974 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003975 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003976 TX_BD(txdata->tx_pkt_prod),
3977 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003978 return NETDEV_TX_OK;
3979 }
3980
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003981 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003982 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003983 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003984 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003985
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003986 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3987 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003988 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3989 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003990 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003991
3992 DP(NETIF_MSG_TX_QUEUED,
3993 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3994 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3995 le16_to_cpu(tx_data_bd->nbytes));
3996 }
3997
3998 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3999
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004000 /* update with actual num BDs */
4001 first_bd->nbd = cpu_to_le16(nbd);
4002
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004003 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4004
4005 /* now send a tx doorbell, counting the next BD
4006 * if the packet contains or ends with it
4007 */
4008 if (TX_BD_POFF(bd_prod) < nbd)
4009 nbd++;
4010
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004011 /* total_pkt_bytes should be set on the first data BD if
4012 * it's not an LSO packet and there is more than one
4013 * data BD. In this case pkt_size is limited by an MTU value.
4014 * However we prefer to set it for an LSO packet (while we don't
4015 * have to) in order to save some CPU cycles in a none-LSO
4016 * case, when we much more care about them.
4017 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004018 if (total_pkt_bd != NULL)
4019 total_pkt_bd->total_pkt_bytes = pkt_size;
4020
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004021 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004022 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004023 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004024 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4025 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4026 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4027 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004028 if (pbd_e2)
4029 DP(NETIF_MSG_TX_QUEUED,
4030 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004031 pbd_e2,
4032 pbd_e2->data.mac_addr.dst_hi,
4033 pbd_e2->data.mac_addr.dst_mid,
4034 pbd_e2->data.mac_addr.dst_lo,
4035 pbd_e2->data.mac_addr.src_hi,
4036 pbd_e2->data.mac_addr.src_mid,
4037 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004038 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004039 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4040
Tom Herbert2df1a702011-11-28 16:33:37 +00004041 netdev_tx_sent_queue(txq, skb->len);
4042
Willem de Bruijn8373c572012-04-27 09:04:06 +00004043 skb_tx_timestamp(skb);
4044
Ariel Elior6383c0b2011-07-14 08:31:57 +00004045 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004046 /*
4047 * Make sure that the BD data is updated before updating the producer
4048 * since FW might read the BD right after the producer is updated.
4049 * This is only applicable for weak-ordered memory model archs such
4050 * as IA-64. The following barrier is also mandatory since FW will
4051 * assumes packets must have BDs.
4052 */
4053 wmb();
4054
Ariel Elior6383c0b2011-07-14 08:31:57 +00004055 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004056 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004057
Ariel Elior6383c0b2011-07-14 08:31:57 +00004058 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004059
4060 mmiowb();
4061
Ariel Elior6383c0b2011-07-14 08:31:57 +00004062 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004063
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004064 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004065 netif_tx_stop_queue(txq);
4066
4067 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4068 * ordering of set_bit() in netif_tx_stop_queue() and read of
4069 * fp->bd_tx_cons */
4070 smp_mb();
4071
Barak Witkowski15192a82012-06-19 07:48:28 +00004072 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004073 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004074 netif_tx_wake_queue(txq);
4075 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004076 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004077
4078 return NETDEV_TX_OK;
4079}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004080
Ariel Elior6383c0b2011-07-14 08:31:57 +00004081/**
4082 * bnx2x_setup_tc - routine to configure net_device for multi tc
4083 *
4084 * @netdev: net device to configure
4085 * @tc: number of traffic classes to enable
4086 *
4087 * callback connected to the ndo_setup_tc function pointer
4088 */
4089int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4090{
4091 int cos, prio, count, offset;
4092 struct bnx2x *bp = netdev_priv(dev);
4093
4094 /* setup tc must be called under rtnl lock */
4095 ASSERT_RTNL();
4096
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004097 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004098 if (!num_tc) {
4099 netdev_reset_tc(dev);
4100 return 0;
4101 }
4102
4103 /* requested to support too many traffic classes */
4104 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004105 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004106 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004107 return -EINVAL;
4108 }
4109
4110 /* declare amount of supported traffic classes */
4111 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004112 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004113 return -EINVAL;
4114 }
4115
4116 /* configure priority to traffic class mapping */
4117 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4118 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004119 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4120 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004121 prio, bp->prio_to_cos[prio]);
4122 }
4123
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004124 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004125 This can be used for ets or pfc, and save the effort of setting
4126 up a multio class queue disc or negotiating DCBX with a switch
4127 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004128 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004129 for (prio = 1; prio < 16; prio++) {
4130 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004131 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004132 } */
4133
4134 /* configure traffic class to transmission queue mapping */
4135 for (cos = 0; cos < bp->max_cos; cos++) {
4136 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004137 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004138 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004139 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4140 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004141 cos, offset, count);
4142 }
4143
4144 return 0;
4145}
4146
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004147/* called with rtnl_lock */
4148int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4149{
4150 struct sockaddr *addr = p;
4151 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004152 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004153
Merav Sicron51c1a582012-03-18 10:33:38 +00004154 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4155 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004156 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004157 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004158
Barak Witkowskia3348722012-04-23 03:04:46 +00004159 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4160 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004161 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004162 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004163 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004164
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004165 if (netif_running(dev)) {
4166 rc = bnx2x_set_eth_mac(bp, false);
4167 if (rc)
4168 return rc;
4169 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004170
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004171 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4172
4173 if (netif_running(dev))
4174 rc = bnx2x_set_eth_mac(bp, true);
4175
4176 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004177}
4178
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004179static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4180{
4181 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4182 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004183 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004184
4185 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004186
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004187 if (IS_FCOE_IDX(fp_index)) {
4188 memset(sb, 0, sizeof(union host_hc_status_block));
4189 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004190 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004191 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004192 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004193 BNX2X_PCI_FREE(sb->e2_sb,
4194 bnx2x_fp(bp, fp_index,
4195 status_blk_mapping),
4196 sizeof(struct host_hc_status_block_e2));
4197 else
4198 BNX2X_PCI_FREE(sb->e1x_sb,
4199 bnx2x_fp(bp, fp_index,
4200 status_blk_mapping),
4201 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004202 }
Merav Sicron55c11942012-11-07 00:45:48 +00004203
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004204 /* Rx */
4205 if (!skip_rx_queue(bp, fp_index)) {
4206 bnx2x_free_rx_bds(fp);
4207
4208 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4209 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4210 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4211 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4212 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4213
4214 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4215 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4216 sizeof(struct eth_fast_path_rx_cqe) *
4217 NUM_RCQ_BD);
4218
4219 /* SGE ring */
4220 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4221 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4222 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4223 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4224 }
4225
4226 /* Tx */
4227 if (!skip_tx_queue(bp, fp_index)) {
4228 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004229 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004230 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004231
Merav Sicron51c1a582012-03-18 10:33:38 +00004232 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004233 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004234 fp_index, cos, txdata->cid);
4235
4236 BNX2X_FREE(txdata->tx_buf_ring);
4237 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4238 txdata->tx_desc_mapping,
4239 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4240 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004241 }
4242 /* end of fastpath */
4243}
4244
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004245static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004246{
4247 int i;
4248 for_each_cnic_queue(bp, i)
4249 bnx2x_free_fp_mem_at(bp, i);
4250}
4251
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004252void bnx2x_free_fp_mem(struct bnx2x *bp)
4253{
4254 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004255 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004256 bnx2x_free_fp_mem_at(bp, i);
4257}
4258
Eric Dumazet1191cb82012-04-27 21:39:21 +00004259static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004260{
4261 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004262 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004263 bnx2x_fp(bp, index, sb_index_values) =
4264 (__le16 *)status_blk.e2_sb->sb.index_values;
4265 bnx2x_fp(bp, index, sb_running_index) =
4266 (__le16 *)status_blk.e2_sb->sb.running_index;
4267 } else {
4268 bnx2x_fp(bp, index, sb_index_values) =
4269 (__le16 *)status_blk.e1x_sb->sb.index_values;
4270 bnx2x_fp(bp, index, sb_running_index) =
4271 (__le16 *)status_blk.e1x_sb->sb.running_index;
4272 }
4273}
4274
Eric Dumazet1191cb82012-04-27 21:39:21 +00004275/* Returns the number of actually allocated BDs */
4276static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4277 int rx_ring_size)
4278{
4279 struct bnx2x *bp = fp->bp;
4280 u16 ring_prod, cqe_ring_prod;
4281 int i, failure_cnt = 0;
4282
4283 fp->rx_comp_cons = 0;
4284 cqe_ring_prod = ring_prod = 0;
4285
4286 /* This routine is called only during fo init so
4287 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4288 */
4289 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004290 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004291 failure_cnt++;
4292 continue;
4293 }
4294 ring_prod = NEXT_RX_IDX(ring_prod);
4295 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4296 WARN_ON(ring_prod <= (i - failure_cnt));
4297 }
4298
4299 if (failure_cnt)
4300 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4301 i - failure_cnt, fp->index);
4302
4303 fp->rx_bd_prod = ring_prod;
4304 /* Limit the CQE producer by the CQE ring size */
4305 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4306 cqe_ring_prod);
4307 fp->rx_pkt = fp->rx_calls = 0;
4308
Barak Witkowski15192a82012-06-19 07:48:28 +00004309 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004310
4311 return i - failure_cnt;
4312}
4313
4314static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4315{
4316 int i;
4317
4318 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319 struct eth_rx_cqe_next_page *nextpg;
4320
4321 nextpg = (struct eth_rx_cqe_next_page *)
4322 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4323 nextpg->addr_hi =
4324 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4326 nextpg->addr_lo =
4327 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4328 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4329 }
4330}
4331
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004332static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4333{
4334 union host_hc_status_block *sb;
4335 struct bnx2x_fastpath *fp = &bp->fp[index];
4336 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004337 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004338 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004339
Barak Witkowskia3348722012-04-23 03:04:46 +00004340 if (!bp->rx_ring_size &&
4341 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004342 rx_ring_size = MIN_RX_SIZE_NONTPA;
4343 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004344 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004345 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4346
Yuval Mintz065f8b92012-10-03 04:22:59 +00004347 if (CHIP_IS_E3(bp)) {
4348 u32 cfg = SHMEM_RD(bp,
4349 dev_info.port_hw_config[BP_PORT(bp)].
4350 default_cfg);
4351
4352 /* Decrease ring size for 1G functions */
4353 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4354 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4355 rx_ring_size /= 10;
4356 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004357
David S. Miller8decf862011-09-22 03:23:13 -04004358 /* allocate at least number of buffers required by FW */
4359 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4360 MIN_RX_SIZE_TPA, rx_ring_size);
4361
4362 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004363 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004364 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004365
Yuval Mintz04c46732013-01-23 03:21:46 +00004366 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4367
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004368 /* Common */
4369 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004370
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004371 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004372 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004373 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004374 BNX2X_PCI_ALLOC(sb->e2_sb,
4375 &bnx2x_fp(bp, index, status_blk_mapping),
4376 sizeof(struct host_hc_status_block_e2));
4377 else
4378 BNX2X_PCI_ALLOC(sb->e1x_sb,
4379 &bnx2x_fp(bp, index, status_blk_mapping),
4380 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004381 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004382
4383 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4384 * set shortcuts for it.
4385 */
4386 if (!IS_FCOE_IDX(index))
4387 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004388
4389 /* Tx */
4390 if (!skip_tx_queue(bp, index)) {
4391 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004392 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004393 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004394
Merav Sicron51c1a582012-03-18 10:33:38 +00004395 DP(NETIF_MSG_IFUP,
4396 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004397 index, cos);
4398
4399 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004400 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004401 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4402 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004403 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004404 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004405 }
4406
4407 /* Rx */
4408 if (!skip_rx_queue(bp, index)) {
4409 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4410 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4411 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4412 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4413 &bnx2x_fp(bp, index, rx_desc_mapping),
4414 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4415
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004416 /* Seed all CQEs by 1s */
4417 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4418 &bnx2x_fp(bp, index, rx_comp_mapping),
4419 sizeof(struct eth_fast_path_rx_cqe) *
4420 NUM_RCQ_BD);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004421
4422 /* SGE ring */
4423 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4424 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4425 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4426 &bnx2x_fp(bp, index, rx_sge_mapping),
4427 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4428 /* RX BD ring */
4429 bnx2x_set_next_page_rx_bd(fp);
4430
4431 /* CQ ring */
4432 bnx2x_set_next_page_rx_cq(fp);
4433
4434 /* BDs */
4435 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4436 if (ring_size < rx_ring_size)
4437 goto alloc_mem_err;
4438 }
4439
4440 return 0;
4441
4442/* handles low memory cases */
4443alloc_mem_err:
4444 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4445 index, ring_size);
4446 /* FW will drop all packets if queue is not big enough,
4447 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004448 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004449 */
4450 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004451 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004452 /* release memory allocated for this queue */
4453 bnx2x_free_fp_mem_at(bp, index);
4454 return -ENOMEM;
4455 }
4456 return 0;
4457}
4458
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004459static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004460{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004461 if (!NO_FCOE(bp))
4462 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004463 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004464 /* we will fail load process instead of mark
4465 * NO_FCOE_FLAG
4466 */
4467 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004468
4469 return 0;
4470}
4471
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004472static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004473{
4474 int i;
4475
4476 /* 1. Allocate FP for leading - fatal if error
4477 * 2. Allocate RSS - fix number of queues if error
4478 */
4479
4480 /* leading */
4481 if (bnx2x_alloc_fp_mem_at(bp, 0))
4482 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004483
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004484 /* RSS */
4485 for_each_nondefault_eth_queue(bp, i)
4486 if (bnx2x_alloc_fp_mem_at(bp, i))
4487 break;
4488
4489 /* handle memory failures */
4490 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4491 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4492
4493 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004494 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004495 if (CNIC_SUPPORT(bp))
4496 /* move non eth FPs next to last eth FP
4497 * must be done in that order
4498 * FCOE_IDX < FWD_IDX < OOO_IDX
4499 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004500
Merav Sicron55c11942012-11-07 00:45:48 +00004501 /* move FCoE fp even NO_FCOE_FLAG is on */
4502 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4503 bp->num_ethernet_queues -= delta;
4504 bp->num_queues = bp->num_ethernet_queues +
4505 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004506 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4507 bp->num_queues + delta, bp->num_queues);
4508 }
4509
4510 return 0;
4511}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004512
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004513void bnx2x_free_mem_bp(struct bnx2x *bp)
4514{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004515 int i;
4516
4517 for (i = 0; i < bp->fp_array_size; i++)
4518 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004519 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004520 kfree(bp->sp_objs);
4521 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004522 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004523 kfree(bp->msix_table);
4524 kfree(bp->ilt);
4525}
4526
Bill Pemberton0329aba2012-12-03 09:24:24 -05004527int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004528{
4529 struct bnx2x_fastpath *fp;
4530 struct msix_entry *tbl;
4531 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004532 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004533 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004534 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004535
Ariel Elior6383c0b2011-07-14 08:31:57 +00004536 /*
4537 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004538 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004539 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004540 msix_table_size = bp->igu_sb_cnt;
4541 if (IS_PF(bp))
4542 msix_table_size++;
4543 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004544
4545 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004546 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004547 bp->fp_array_size = fp_array_size;
4548 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004549
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004550 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004551 if (!fp)
4552 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004553 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004554 fp[i].tpa_info =
4555 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4556 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4557 if (!(fp[i].tpa_info))
4558 goto alloc_err;
4559 }
4560
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004561 bp->fp = fp;
4562
Barak Witkowski15192a82012-06-19 07:48:28 +00004563 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004564 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004565 GFP_KERNEL);
4566 if (!bp->sp_objs)
4567 goto alloc_err;
4568
4569 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004570 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004571 GFP_KERNEL);
4572 if (!bp->fp_stats)
4573 goto alloc_err;
4574
Merav Sicron65565882012-06-19 07:48:26 +00004575 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004576 txq_array_size =
4577 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4578 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4579
4580 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4581 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004582 if (!bp->bnx2x_txq)
4583 goto alloc_err;
4584
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004585 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004586 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004587 if (!tbl)
4588 goto alloc_err;
4589 bp->msix_table = tbl;
4590
4591 /* ilt */
4592 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4593 if (!ilt)
4594 goto alloc_err;
4595 bp->ilt = ilt;
4596
4597 return 0;
4598alloc_err:
4599 bnx2x_free_mem_bp(bp);
4600 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004601}
4602
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004603int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004604{
4605 struct bnx2x *bp = netdev_priv(dev);
4606
4607 if (unlikely(!netif_running(dev)))
4608 return 0;
4609
Yuval Mintz5d07d862012-09-13 02:56:21 +00004610 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004611 return bnx2x_nic_load(bp, LOAD_NORMAL);
4612}
4613
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004614int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4615{
4616 u32 sel_phy_idx = 0;
4617 if (bp->link_params.num_phys <= 1)
4618 return INT_PHY;
4619
4620 if (bp->link_vars.link_up) {
4621 sel_phy_idx = EXT_PHY1;
4622 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4623 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4624 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4625 sel_phy_idx = EXT_PHY2;
4626 } else {
4627
4628 switch (bnx2x_phy_selection(&bp->link_params)) {
4629 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4630 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4631 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4632 sel_phy_idx = EXT_PHY1;
4633 break;
4634 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4635 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4636 sel_phy_idx = EXT_PHY2;
4637 break;
4638 }
4639 }
4640
4641 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004642}
4643int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4644{
4645 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4646 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004647 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004648 * swapping is enabled). So when swapping is enabled, we need to reverse
4649 * the configuration
4650 */
4651
4652 if (bp->link_params.multi_phy_config &
4653 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4654 if (sel_phy_idx == EXT_PHY1)
4655 sel_phy_idx = EXT_PHY2;
4656 else if (sel_phy_idx == EXT_PHY2)
4657 sel_phy_idx = EXT_PHY1;
4658 }
4659 return LINK_CONFIG_IDX(sel_phy_idx);
4660}
4661
Merav Sicron55c11942012-11-07 00:45:48 +00004662#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004663int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4664{
4665 struct bnx2x *bp = netdev_priv(dev);
4666 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4667
4668 switch (type) {
4669 case NETDEV_FCOE_WWNN:
4670 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4671 cp->fcoe_wwn_node_name_lo);
4672 break;
4673 case NETDEV_FCOE_WWPN:
4674 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4675 cp->fcoe_wwn_port_name_lo);
4676 break;
4677 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004678 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004679 return -EINVAL;
4680 }
4681
4682 return 0;
4683}
4684#endif
4685
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004686/* called with rtnl_lock */
4687int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4688{
4689 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004690
4691 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004692 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004693 return -EAGAIN;
4694 }
4695
4696 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004697 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4698 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004699 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004700 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004701
4702 /* This does not race with packet allocation
4703 * because the actual alloc size is
4704 * only updated as part of load
4705 */
4706 dev->mtu = new_mtu;
4707
Michał Mirosław66371c42011-04-12 09:38:23 +00004708 return bnx2x_reload_if_running(dev);
4709}
4710
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004711netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004712 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004713{
4714 struct bnx2x *bp = netdev_priv(dev);
4715
4716 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004717 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004718 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004719 features &= ~NETIF_F_GRO;
4720 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004721
4722 return features;
4723}
4724
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004725int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004726{
4727 struct bnx2x *bp = netdev_priv(dev);
4728 u32 flags = bp->flags;
Eric Dumazet8802f572013-05-18 07:14:53 +00004729 u32 changes;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004730 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004731
4732 if (features & NETIF_F_LRO)
4733 flags |= TPA_ENABLE_FLAG;
4734 else
4735 flags &= ~TPA_ENABLE_FLAG;
4736
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004737 if (features & NETIF_F_GRO)
4738 flags |= GRO_ENABLE_FLAG;
4739 else
4740 flags &= ~GRO_ENABLE_FLAG;
4741
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004742 if (features & NETIF_F_LOOPBACK) {
4743 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4744 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4745 bnx2x_reload = true;
4746 }
4747 } else {
4748 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4749 bp->link_params.loopback_mode = LOOPBACK_NONE;
4750 bnx2x_reload = true;
4751 }
4752 }
4753
Eric Dumazet8802f572013-05-18 07:14:53 +00004754 changes = flags ^ bp->flags;
4755
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004756 /* if GRO is changed while LRO is enabled, don't force a reload */
Eric Dumazet8802f572013-05-18 07:14:53 +00004757 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4758 changes &= ~GRO_ENABLE_FLAG;
4759
4760 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004761 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004762
4763 bp->flags = flags;
Michał Mirosław66371c42011-04-12 09:38:23 +00004764
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004765 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004766 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4767 return bnx2x_reload_if_running(dev);
4768 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004769 }
4770
Michał Mirosław66371c42011-04-12 09:38:23 +00004771 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004772}
4773
4774void bnx2x_tx_timeout(struct net_device *dev)
4775{
4776 struct bnx2x *bp = netdev_priv(dev);
4777
4778#ifdef BNX2X_STOP_ON_ERROR
4779 if (!bp->panic)
4780 bnx2x_panic();
4781#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004782
4783 smp_mb__before_clear_bit();
4784 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4785 smp_mb__after_clear_bit();
4786
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004787 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004788 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004789}
4790
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004791int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4792{
4793 struct net_device *dev = pci_get_drvdata(pdev);
4794 struct bnx2x *bp;
4795
4796 if (!dev) {
4797 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4798 return -ENODEV;
4799 }
4800 bp = netdev_priv(dev);
4801
4802 rtnl_lock();
4803
4804 pci_save_state(pdev);
4805
4806 if (!netif_running(dev)) {
4807 rtnl_unlock();
4808 return 0;
4809 }
4810
4811 netif_device_detach(dev);
4812
Yuval Mintz5d07d862012-09-13 02:56:21 +00004813 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004814
4815 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4816
4817 rtnl_unlock();
4818
4819 return 0;
4820}
4821
4822int bnx2x_resume(struct pci_dev *pdev)
4823{
4824 struct net_device *dev = pci_get_drvdata(pdev);
4825 struct bnx2x *bp;
4826 int rc;
4827
4828 if (!dev) {
4829 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4830 return -ENODEV;
4831 }
4832 bp = netdev_priv(dev);
4833
4834 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004835 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004836 return -EAGAIN;
4837 }
4838
4839 rtnl_lock();
4840
4841 pci_restore_state(pdev);
4842
4843 if (!netif_running(dev)) {
4844 rtnl_unlock();
4845 return 0;
4846 }
4847
4848 bnx2x_set_power_state(bp, PCI_D0);
4849 netif_device_attach(dev);
4850
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004851 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4852
4853 rtnl_unlock();
4854
4855 return rc;
4856}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004857
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004858void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4859 u32 cid)
4860{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004861 if (!cxt) {
4862 BNX2X_ERR("bad context pointer %p\n", cxt);
4863 return;
4864 }
4865
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004866 /* ustorm cxt validation */
4867 cxt->ustorm_ag_context.cdu_usage =
4868 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4869 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4870 /* xcontext validation */
4871 cxt->xstorm_ag_context.cdu_reserved =
4872 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4873 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4874}
4875
Eric Dumazet1191cb82012-04-27 21:39:21 +00004876static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4877 u8 fw_sb_id, u8 sb_index,
4878 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004879{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004880 u32 addr = BAR_CSTRORM_INTMEM +
4881 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4882 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004883 DP(NETIF_MSG_IFUP,
4884 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4885 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004886}
4887
Eric Dumazet1191cb82012-04-27 21:39:21 +00004888static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4889 u16 fw_sb_id, u8 sb_index,
4890 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004891{
4892 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4893 u32 addr = BAR_CSTRORM_INTMEM +
4894 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004895 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004896 /* clear and set */
4897 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4898 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004899 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004900 DP(NETIF_MSG_IFUP,
4901 "port %x fw_sb_id %d sb_index %d disable %d\n",
4902 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004903}
4904
4905void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4906 u8 sb_index, u8 disable, u16 usec)
4907{
4908 int port = BP_PORT(bp);
4909 u8 ticks = usec / BNX2X_BTR;
4910
4911 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4912
4913 disable = disable ? 1 : (usec ? 0 : 1);
4914 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4915}