blob: 6dc32aee96bfd40634742da87233bdf60f862476 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Ariel Elior08f6dd82014-05-27 13:11:36 +03009 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Amir Vadaic9931892014-08-25 16:06:54 +030024#include <linux/crash_dump.h>
Yuval Mintz99690852013-01-14 05:11:49 +000025#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000026#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070027#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030028#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000029#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000030#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000032#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
stephen hemmingera8f47eb2014-01-09 22:20:11 -080034static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
37static int bnx2x_poll(struct napi_struct *napi, int budget);
38
39static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
40{
41 int i;
42
43 /* Add NAPI objects */
44 for_each_rx_queue_cnic(bp, i) {
45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
46 bnx2x_poll, NAPI_POLL_WEIGHT);
47 napi_hash_add(&bnx2x_fp(bp, i, napi));
48 }
49}
50
51static void bnx2x_add_all_napi(struct bnx2x *bp)
52{
53 int i;
54
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 napi_hash_add(&bnx2x_fp(bp, i, napi));
60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
Michal Schmidt7d0445d2014-02-25 16:04:24 +010065 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
Michal Schmidtff2ad302014-02-25 16:04:25 +010066
67 /* Reduce memory usage in kdump environment by using only one queue */
Amir Vadaic9931892014-08-25 16:06:54 +030068 if (is_kdump_kernel())
Michal Schmidtff2ad302014-02-25 16:04:25 +010069 nq = 1;
70
Michal Schmidt7d0445d2014-02-25 16:04:24 +010071 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
stephen hemmingera8f47eb2014-01-09 22:20:11 -080073}
74
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000075/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000076 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000083 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000085 * source onto the target. Update txdata pointers and related
86 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000092 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000096 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030098 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000099
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000106
Yuval Mintz34d56262013-08-28 01:13:01 +0300107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
Barak Witkowski15192a82012-06-19 07:48:28 +0000112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
Merav Sicron65565882012-06-19 07:48:26 +0000118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
Yuval Mintz4864a162013-01-10 04:53:39 +0000131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000135}
136
Ariel Elior8ca5e172013-01-01 05:22:34 +0000137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000162 }
163}
164
David S. Miller4b87f922013-01-15 15:05:59 -0500165/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000176 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000198{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100205 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
Merav Sicron51c1a582012-03-18 10:33:38 +0000210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000211 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000212
Ariel Elior6383c0b2011-07-14 08:31:57 +0000213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +0300231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000250 /* now free frags */
251 while (nbd > 0) {
252
Ariel Elior6383c0b2011-07-14 08:31:57 +0000253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000262 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
265 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000266
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000267 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
Ariel Elior6383c0b2011-07-14 08:31:57 +0000274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000275{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000276 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000278 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
Ariel Elior6383c0b2011-07-14 08:31:57 +0000285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
Merav Sicron51c1a582012-03-18 10:33:38 +0000294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000297
Tom Herbert2df1a702011-11-28 16:33:37 +0000298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000299 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000300
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000301 sw_cons++;
302 }
303
Tom Herbert2df1a702011-11-28 16:33:37 +0000304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
Ariel Elior6383c0b2011-07-14 08:31:57 +0000306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317 */
318 smp_mb();
319
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000320 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000321 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000355{
356 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000374 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000376
377 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
Yuval Mintz2de67432013-01-23 03:21:43 +0000405/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000409 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800410 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000411{
Yuval Mintz2de67432013-01-23 03:21:43 +0000412 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000413 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
Eric Dumazete52fcb22011-11-14 06:05:34 +0000422 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000423 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800424 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000425 return 0;
426}
427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000429 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300430 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000439
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
Eric Dumazete52fcb22011-11-14 06:05:34 +0000444 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300445 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000446 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000453
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000456 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
460
Eric Dumazete52fcb22011-11-14 06:05:34 +0000461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000464 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000482 tpa_info->gro_size = gro_size;
483 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000488 fp->tpa_queue_used);
489#endif
490}
491
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000498 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000499 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000500 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000504 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000508 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000509 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000513{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000514 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300515 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000516 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000520 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300524 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000542}
543
Michal Schmidt996dedb2013-09-05 22:13:09 +0200544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000546{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200547 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000548 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
549 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
550 dma_addr_t mapping;
551
552 if (unlikely(page == NULL)) {
553 BNX2X_ERR("Can't alloc sge\n");
554 return -ENOMEM;
555 }
556
557 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000558 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000559 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
560 __free_pages(page, PAGES_PER_SGE_SHIFT);
561 BNX2X_ERR("Can't map sge\n");
562 return -ENOMEM;
563 }
564
565 sw_buf->page = page;
566 dma_unmap_addr_set(sw_buf, mapping, mapping);
567
568 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
569 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
570
571 return 0;
572}
573
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000574static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000575 struct bnx2x_agg_info *tpa_info,
576 u16 pages,
577 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300578 struct eth_end_agg_rx_cqe *cqe,
579 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580{
581 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000582 u32 i, frag_len, frag_size;
583 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300584 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000585 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000586
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300587 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000588
589 if (fp->mode == TPA_MODE_GRO) {
590 gro_size = tpa_info->gro_size;
591 full_page = tpa_info->full_page;
592 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000593
594 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000595 if (frag_size)
596 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000597 le16_to_cpu(cqe->pkt_len),
598 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000599
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000600#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000601 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000602 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
603 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300604 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000605 bnx2x_panic();
606 return -EINVAL;
607 }
608#endif
609
610 /* Run through the SGL and compose the fragmented skb */
611 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300612 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000613
614 /* FW gives the indices of the SGE as if the ring is an array
615 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000616 if (fp->mode == TPA_MODE_GRO)
617 frag_len = min_t(u32, frag_size, (u32)full_page);
618 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000619 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000620
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000621 rx_pg = &fp->rx_page_ring[sge_idx];
622 old_rx_pg = *rx_pg;
623
624 /* If we fail to allocate a substitute page, we simply stop
625 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200626 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000627 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000628 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000629 return err;
630 }
631
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000632 /* Unmap the page as we're going to pass it to the stack */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000633 dma_unmap_page(&bp->pdev->dev,
634 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000635 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000636 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000637 if (fp->mode == TPA_MODE_LRO)
638 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
639 else { /* GRO */
640 int rem;
641 int offset = 0;
642 for (rem = frag_len; rem > 0; rem -= gro_size) {
643 int len = rem > gro_size ? gro_size : rem;
644 skb_fill_page_desc(skb, frag_id++,
645 old_rx_pg.page, offset, len);
646 if (offset)
647 get_page(old_rx_pg.page);
648 offset += len;
649 }
650 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000651
652 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000653 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000654 skb->len += frag_len;
655
656 frag_size -= frag_len;
657 }
658
659 return 0;
660}
661
Eric Dumazetd46d1322012-12-10 12:16:06 +0000662static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
663{
664 if (fp->rx_frag_size)
665 put_page(virt_to_head_page(data));
666 else
667 kfree(data);
668}
669
Michal Schmidt996dedb2013-09-05 22:13:09 +0200670static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000671{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200672 if (fp->rx_frag_size) {
673 /* GFP_KERNEL allocations are used only during initialization */
674 if (unlikely(gfp_mask & __GFP_WAIT))
675 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000676
Michal Schmidt996dedb2013-09-05 22:13:09 +0200677 return netdev_alloc_frag(fp->rx_frag_size);
678 }
679
680 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000681}
682
Yuval Mintz99690852013-01-14 05:11:49 +0000683#ifdef CONFIG_INET
684static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
685{
686 const struct iphdr *iph = ip_hdr(skb);
687 struct tcphdr *th;
688
689 skb_set_transport_header(skb, sizeof(struct iphdr));
690 th = tcp_hdr(skb);
691
692 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
693 iph->saddr, iph->daddr, 0);
694}
695
696static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
697{
698 struct ipv6hdr *iph = ipv6_hdr(skb);
699 struct tcphdr *th;
700
701 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
702 th = tcp_hdr(skb);
703
704 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
705 &iph->saddr, &iph->daddr, 0);
706}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000707
708static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
709 void (*gro_func)(struct bnx2x*, struct sk_buff*))
710{
711 skb_set_network_header(skb, 0);
712 gro_func(bp, skb);
713 tcp_gro_complete(skb);
714}
Yuval Mintz99690852013-01-14 05:11:49 +0000715#endif
716
717static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
718 struct sk_buff *skb)
719{
720#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000721 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000722 switch (be16_to_cpu(skb->protocol)) {
723 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000725 break;
726 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000727 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000728 break;
729 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000730 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000731 be16_to_cpu(skb->protocol));
732 }
Yuval Mintz99690852013-01-14 05:11:49 +0000733 }
734#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700735 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000736 napi_gro_receive(&fp->napi, skb);
737}
738
Eric Dumazet1191cb82012-04-27 21:39:21 +0000739static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 struct bnx2x_agg_info *tpa_info,
741 u16 pages,
742 struct eth_end_agg_rx_cqe *cqe,
743 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000744{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300745 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000746 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300747 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000748 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000749 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300750 u8 old_tpa_state = tpa_info->tpa_state;
751
752 tpa_info->tpa_state = BNX2X_TPA_STOP;
753
754 /* If we there was an error during the handling of the TPA_START -
755 * drop this aggregation.
756 */
757 if (old_tpa_state == BNX2X_TPA_ERROR)
758 goto drop;
759
Eric Dumazete52fcb22011-11-14 06:05:34 +0000760 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200761 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000762 /* Unmap skb in the pool anyway, as we are going to change
763 pool entry status to BNX2X_TPA_STOP even if new skb allocation
764 fails. */
765 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800766 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000767 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000768 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000769
Eric Dumazete52fcb22011-11-14 06:05:34 +0000770 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000771#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800772 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000773 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800774 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000775 bnx2x_panic();
776 return;
777 }
778#endif
779
Eric Dumazete52fcb22011-11-14 06:05:34 +0000780 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000781 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800782 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000783
784 skb->protocol = eth_type_trans(skb, bp->dev);
785 skb->ip_summed = CHECKSUM_UNNECESSARY;
786
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000787 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
788 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300789 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000790 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000791 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000792 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000793 DP(NETIF_MSG_RX_STATUS,
794 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000795 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000796 }
797
Eric Dumazete52fcb22011-11-14 06:05:34 +0000798 /* put new data in bin */
799 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000800
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300801 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802 }
Eric Dumazet07b0f002014-06-26 00:44:02 -0700803 if (new_data)
804 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300805drop:
806 /* drop the packet and keep the buffer in the bin */
807 DP(NETIF_MSG_RX_STATUS,
808 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000809 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000810}
811
Michal Schmidt996dedb2013-09-05 22:13:09 +0200812static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000814{
815 u8 *data;
816 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
817 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
818 dma_addr_t mapping;
819
Michal Schmidt996dedb2013-09-05 22:13:09 +0200820 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000821 if (unlikely(data == NULL))
822 return -ENOMEM;
823
824 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
825 fp->rx_buf_size,
826 DMA_FROM_DEVICE);
827 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000828 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000829 BNX2X_ERR("Can't map rx data\n");
830 return -ENOMEM;
831 }
832
833 rx_buf->data = data;
834 dma_unmap_addr_set(rx_buf, mapping, mapping);
835
836 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
837 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
838
839 return 0;
840}
841
Barak Witkowski15192a82012-06-19 07:48:28 +0000842static
843void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
844 struct bnx2x_fastpath *fp,
845 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000846{
Michal Schmidte4889212012-09-13 12:59:44 +0000847 /* Do nothing if no L4 csum validation was done.
848 * We do not check whether IP csum was validated. For IPv4 we assume
849 * that if the card got as far as validating the L4 csum, it also
850 * validated the IP csum. IPv6 has no IP csum.
851 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000852 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000853 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000854 return;
855
Michal Schmidte4889212012-09-13 12:59:44 +0000856 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000857
858 if (cqe->fast_path_cqe.type_error_flags &
859 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
860 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000861 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000862 else
863 skb->ip_summed = CHECKSUM_UNNECESSARY;
864}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800866static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000867{
868 struct bnx2x *bp = fp->bp;
869 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300870 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000871 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300872 union eth_rx_cqe *cqe;
873 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000874
875#ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp->panic))
877 return 0;
878#endif
Eric W. Biedermanb3529742014-03-14 17:57:59 -0700879 if (budget <= 0)
880 return rx_pkt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000882 bd_cons = fp->rx_bd_cons;
883 bd_prod = fp->rx_bd_prod;
884 bd_prod_fw = bd_prod;
885 sw_comp_cons = fp->rx_comp_cons;
886 sw_comp_prod = fp->rx_comp_prod;
887
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300888 comp_ring_cons = RCQ_BD(sw_comp_cons);
889 cqe = &fp->rx_comp_ring[comp_ring_cons];
890 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000891
892 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300893 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000894
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300895 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000896 struct sw_rx_bd *rx_buf = NULL;
897 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000898 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300899 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000900 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000901 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800902 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800903 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000904
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300905#ifdef BNX2X_STOP_ON_ERROR
906 if (unlikely(bp->panic))
907 return 0;
908#endif
909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910 bd_prod = RX_BD(bd_prod);
911 bd_cons = RX_BD(bd_cons);
912
wenxiong@linux.vnet.ibm.com9aaae042014-06-03 14:14:46 -0500913 /* A rmb() is required to ensure that the CQE is not read
914 * before it is written by the adapter DMA. PCI ordering
915 * rules will make sure the other fields are written before
916 * the marker at the end of struct eth_fast_path_rx_cqe
917 * but without rmb() a weakly ordered processor can process
918 * stale data. Without the barrier TPA state-machine might
919 * enter inconsistent state and kernel stack might be
920 * provided with incorrect packet description - these lead
921 * to various kernel crashed.
922 */
923 rmb();
924
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300925 cqe_fp_flags = cqe_fp->type_error_flags;
926 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000927
Merav Sicron51c1a582012-03-18 10:33:38 +0000928 DP(NETIF_MSG_RX_STATUS,
929 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
930 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300931 cqe_fp_flags, cqe_fp->status_flags,
932 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000933 le16_to_cpu(cqe_fp->vlan_tag),
934 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000935
936 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300937 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000938 bnx2x_sp_event(fp, cqe);
939 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000940 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000941
Eric Dumazete52fcb22011-11-14 06:05:34 +0000942 rx_buf = &fp->rx_buf_ring[bd_cons];
943 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000944
Eric Dumazete52fcb22011-11-14 06:05:34 +0000945 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000946 struct bnx2x_agg_info *tpa_info;
947 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000949 /* sanity check */
950 if (fp->disable_tpa &&
951 (CQE_TYPE_START(cqe_fp_type) ||
952 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000953 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000954 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300955#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000956
Eric Dumazete52fcb22011-11-14 06:05:34 +0000957 if (CQE_TYPE_START(cqe_fp_type)) {
958 u16 queue = cqe_fp->queue_index;
959 DP(NETIF_MSG_RX_STATUS,
960 "calling tpa_start on queue %d\n",
961 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000962
Eric Dumazete52fcb22011-11-14 06:05:34 +0000963 bnx2x_tpa_start(fp, queue,
964 bd_cons, bd_prod,
965 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000966
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000967 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000968 }
969 queue = cqe->end_agg_cqe.queue_index;
970 tpa_info = &fp->tpa_info[queue];
971 DP(NETIF_MSG_RX_STATUS,
972 "calling tpa_stop on queue %d\n",
973 queue);
974
975 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
976 tpa_info->len_on_bd;
977
978 if (fp->mode == TPA_MODE_GRO)
979 pages = (frag_size + tpa_info->full_page - 1) /
980 tpa_info->full_page;
981 else
982 pages = SGE_PAGE_ALIGN(frag_size) >>
983 SGE_PAGE_SHIFT;
984
985 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
986 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000987#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000988 if (bp->panic)
989 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990#endif
991
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000992 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
993 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000994 }
995 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000996 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000997 pad = cqe_fp->placement_offset;
998 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000999 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +00001000 pad + RX_COPY_THRESH,
1001 DMA_FROM_DEVICE);
1002 pad += NET_SKB_PAD;
1003 prefetch(data + pad); /* speedup eth_type_trans() */
1004 /* is this an error packet? */
1005 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001006 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001007 "ERROR flags %x rx packet %u\n",
1008 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +00001009 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001010 goto reuse_rx;
1011 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001012
Eric Dumazete52fcb22011-11-14 06:05:34 +00001013 /* Since we don't have a jumbo ring
1014 * copy small packets if mtu > 1500
1015 */
1016 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1017 (len <= RX_COPY_THRESH)) {
1018 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1019 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001020 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001021 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001022 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001023 goto reuse_rx;
1024 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001025 memcpy(skb->data, data + pad, len);
1026 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1027 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001028 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1029 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001030 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001031 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001032 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001033 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001034 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001035 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001036 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001037 bnx2x_fp_qstats(bp, fp)->
1038 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001039 goto next_rx;
1040 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001042 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001043 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1044 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001046reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001047 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001048 goto next_rx;
1049 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001050 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001051
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001052 skb_put(skb, len);
1053 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001054
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001055 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001056 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1057 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001058
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001059 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001060
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001061 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001062 bnx2x_csum_validate(skb, cqe, fp,
1063 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001064
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001065 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001066
Michal Kalderoneeed0182014-08-17 16:47:44 +03001067 /* Check if this packet was timestamped */
Yuval Mintz56daf662014-08-28 08:07:32 +03001068 if (unlikely(cqe->fast_path_cqe.type_error_flags &
Michal Kalderoneeed0182014-08-17 16:47:44 +03001069 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1070 bnx2x_set_rx_ts(bp, skb);
1071
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001072 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1073 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001074 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001075 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001076
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001077 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001078
1079 if (bnx2x_fp_ll_polling(fp))
1080 netif_receive_skb(skb);
1081 else
1082 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001083next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001084 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001085
1086 bd_cons = NEXT_RX_IDX(bd_cons);
1087 bd_prod = NEXT_RX_IDX(bd_prod);
1088 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1089 rx_pkt++;
1090next_cqe:
1091 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1092 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1093
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001094 /* mark CQE as free */
1095 BNX2X_SEED_CQE(cqe_fp);
1096
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001097 if (rx_pkt == budget)
1098 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001099
1100 comp_ring_cons = RCQ_BD(sw_comp_cons);
1101 cqe = &fp->rx_comp_ring[comp_ring_cons];
1102 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001103 } /* while */
1104
1105 fp->rx_bd_cons = bd_cons;
1106 fp->rx_bd_prod = bd_prod_fw;
1107 fp->rx_comp_cons = sw_comp_cons;
1108 fp->rx_comp_prod = sw_comp_prod;
1109
1110 /* Update producers */
1111 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1112 fp->rx_sge_prod);
1113
1114 fp->rx_pkt += rx_pkt;
1115 fp->rx_calls++;
1116
1117 return rx_pkt;
1118}
1119
1120static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1121{
1122 struct bnx2x_fastpath *fp = fp_cookie;
1123 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001124 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001125
Merav Sicron51c1a582012-03-18 10:33:38 +00001126 DP(NETIF_MSG_INTR,
1127 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001128 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001129
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001130 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001131
1132#ifdef BNX2X_STOP_ON_ERROR
1133 if (unlikely(bp->panic))
1134 return IRQ_HANDLED;
1135#endif
1136
1137 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001138 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001139 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001140
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001141 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001142 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1143
1144 return IRQ_HANDLED;
1145}
1146
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001147/* HW Lock for shared dual port PHYs */
1148void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1149{
1150 mutex_lock(&bp->port.phy_mutex);
1151
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001152 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001153}
1154
1155void bnx2x_release_phy_lock(struct bnx2x *bp)
1156{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001157 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001158
1159 mutex_unlock(&bp->port.phy_mutex);
1160}
1161
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001162/* calculates MF speed according to current linespeed and MF configuration */
1163u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1164{
1165 u16 line_speed = bp->link_vars.line_speed;
1166 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001167 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1168 bp->mf_config[BP_VN(bp)]);
1169
1170 /* Calculate the current MAX line speed limit for the MF
1171 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001172 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001173 if (IS_MF_SI(bp))
1174 line_speed = (line_speed * maxCfg) / 100;
1175 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001176 u16 vn_max_rate = maxCfg * 100;
1177
1178 if (vn_max_rate < line_speed)
1179 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001180 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001181 }
1182
1183 return line_speed;
1184}
1185
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001186/**
1187 * bnx2x_fill_report_data - fill link report data to report
1188 *
1189 * @bp: driver handle
1190 * @data: link state to update
1191 *
1192 * It uses a none-atomic bit operations because is called under the mutex.
1193 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001194static void bnx2x_fill_report_data(struct bnx2x *bp,
1195 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001196{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001197 memset(data, 0, sizeof(*data));
1198
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001199 if (IS_PF(bp)) {
1200 /* Fill the report data: effective line speed */
1201 data->line_speed = bnx2x_get_mf_speed(bp);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001202
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001203 /* Link is down */
1204 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1205 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1206 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001207
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001208 if (!BNX2X_NUM_ETH_QUEUES(bp))
1209 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1210 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001211
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001212 /* Full DUPLEX */
1213 if (bp->link_vars.duplex == DUPLEX_FULL)
1214 __set_bit(BNX2X_LINK_REPORT_FD,
1215 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001216
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001217 /* Rx Flow Control is ON */
1218 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1219 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1220 &data->link_report_flags);
1221
1222 /* Tx Flow Control is ON */
1223 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1224 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1225 &data->link_report_flags);
1226 } else { /* VF */
1227 *data = bp->vf_link_vars;
1228 }
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001229}
1230
1231/**
1232 * bnx2x_link_report - report link status to OS.
1233 *
1234 * @bp: driver handle
1235 *
1236 * Calls the __bnx2x_link_report() under the same locking scheme
1237 * as a link/PHY state managing code to ensure a consistent link
1238 * reporting.
1239 */
1240
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001241void bnx2x_link_report(struct bnx2x *bp)
1242{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001243 bnx2x_acquire_phy_lock(bp);
1244 __bnx2x_link_report(bp);
1245 bnx2x_release_phy_lock(bp);
1246}
1247
1248/**
1249 * __bnx2x_link_report - report link status to OS.
1250 *
1251 * @bp: driver handle
1252 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001253 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001254 * Should be called under the phy_lock.
1255 */
1256void __bnx2x_link_report(struct bnx2x *bp)
1257{
1258 struct bnx2x_link_report_data cur_data;
1259
1260 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001261 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001262 bnx2x_read_mf_cfg(bp);
1263
1264 /* Read the current link report info */
1265 bnx2x_fill_report_data(bp, &cur_data);
1266
1267 /* Don't report link down or exactly the same link status twice */
1268 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1269 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1270 &bp->last_reported_link.link_report_flags) &&
1271 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1272 &cur_data.link_report_flags)))
1273 return;
1274
1275 bp->link_cnt++;
1276
1277 /* We are going to report a new link parameters now -
1278 * remember the current data for the next time.
1279 */
1280 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1281
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001282 /* propagate status to VFs */
1283 if (IS_PF(bp))
1284 bnx2x_iov_link_update(bp);
1285
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001286 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1287 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001288 netif_carrier_off(bp->dev);
1289 netdev_err(bp->dev, "NIC Link is Down\n");
1290 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001291 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001292 const char *duplex;
1293 const char *flow;
1294
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001295 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001296
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001297 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1298 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001299 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300 else
Joe Perches94f05b02011-08-14 12:16:20 +00001301 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001303 /* Handle the FC at the end so that only these flags would be
1304 * possibly set. This way we may easily check if there is no FC
1305 * enabled.
1306 */
1307 if (cur_data.link_report_flags) {
1308 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1309 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001310 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1311 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001312 flow = "ON - receive & transmit";
1313 else
1314 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001315 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001316 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001317 }
Joe Perches94f05b02011-08-14 12:16:20 +00001318 } else {
1319 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320 }
Joe Perches94f05b02011-08-14 12:16:20 +00001321 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1322 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001323 }
1324}
1325
Eric Dumazet1191cb82012-04-27 21:39:21 +00001326static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1327{
1328 int i;
1329
1330 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1331 struct eth_rx_sge *sge;
1332
1333 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1334 sge->addr_hi =
1335 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1336 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1337
1338 sge->addr_lo =
1339 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1340 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1341 }
1342}
1343
1344static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1345 struct bnx2x_fastpath *fp, int last)
1346{
1347 int i;
1348
1349 for (i = 0; i < last; i++) {
1350 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1351 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1352 u8 *data = first_buf->data;
1353
1354 if (data == NULL) {
1355 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1356 continue;
1357 }
1358 if (tpa_info->tpa_state == BNX2X_TPA_START)
1359 dma_unmap_single(&bp->pdev->dev,
1360 dma_unmap_addr(first_buf, mapping),
1361 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001362 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001363 first_buf->data = NULL;
1364 }
1365}
1366
Merav Sicron55c11942012-11-07 00:45:48 +00001367void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1368{
1369 int j;
1370
1371 for_each_rx_queue_cnic(bp, j) {
1372 struct bnx2x_fastpath *fp = &bp->fp[j];
1373
1374 fp->rx_bd_cons = 0;
1375
1376 /* Activate BD ring */
1377 /* Warning!
1378 * this will generate an interrupt (to the TSTORM)
1379 * must only be done after chip is initialized
1380 */
1381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1382 fp->rx_sge_prod);
1383 }
1384}
1385
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386void bnx2x_init_rx_rings(struct bnx2x *bp)
1387{
1388 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001389 u16 ring_prod;
1390 int i, j;
1391
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001392 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001393 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001394 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001395
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001396 DP(NETIF_MSG_IFUP,
1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1398
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001399 if (!fp->disable_tpa) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001400 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001401 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001402 struct bnx2x_agg_info *tpa_info =
1403 &fp->tpa_info[i];
1404 struct sw_rx_bd *first_buf =
1405 &tpa_info->first_buf;
1406
Michal Schmidt996dedb2013-09-05 22:13:09 +02001407 first_buf->data =
1408 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001409 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001410 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1411 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001412 bnx2x_free_tpa_pool(bp, fp, i);
1413 fp->disable_tpa = 1;
1414 break;
1415 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001416 dma_unmap_addr_set(first_buf, mapping, 0);
1417 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001418 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001419
1420 /* "next page" elements initialization */
1421 bnx2x_set_next_page_sgl(fp);
1422
1423 /* set SGEs bit mask */
1424 bnx2x_init_sge_ring_bit_mask(fp);
1425
1426 /* Allocate SGEs and initialize the ring elements */
1427 for (i = 0, ring_prod = 0;
1428 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1429
Michal Schmidt996dedb2013-09-05 22:13:09 +02001430 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1431 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001432 BNX2X_ERR("was only able to allocate %d rx sges\n",
1433 i);
1434 BNX2X_ERR("disabling TPA for queue[%d]\n",
1435 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001436 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001437 bnx2x_free_rx_sge_range(bp, fp,
1438 ring_prod);
1439 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001440 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001441 fp->disable_tpa = 1;
1442 ring_prod = 0;
1443 break;
1444 }
1445 ring_prod = NEXT_SGE_IDX(ring_prod);
1446 }
1447
1448 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001449 }
1450 }
1451
Merav Sicron55c11942012-11-07 00:45:48 +00001452 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001453 struct bnx2x_fastpath *fp = &bp->fp[j];
1454
1455 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001456
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001457 /* Activate BD ring */
1458 /* Warning!
1459 * this will generate an interrupt (to the TSTORM)
1460 * must only be done after chip is initialized
1461 */
1462 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1463 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001465 if (j != 0)
1466 continue;
1467
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001468 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001469 REG_WR(bp, BAR_USTRORM_INTMEM +
1470 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1471 U64_LO(fp->rx_comp_mapping));
1472 REG_WR(bp, BAR_USTRORM_INTMEM +
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1474 U64_HI(fp->rx_comp_mapping));
1475 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001476 }
1477}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001478
Merav Sicron55c11942012-11-07 00:45:48 +00001479static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1480{
1481 u8 cos;
1482 struct bnx2x *bp = fp->bp;
1483
1484 for_each_cos_in_tx_queue(fp, cos) {
1485 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1486 unsigned pkts_compl = 0, bytes_compl = 0;
1487
1488 u16 sw_prod = txdata->tx_pkt_prod;
1489 u16 sw_cons = txdata->tx_pkt_cons;
1490
1491 while (sw_cons != sw_prod) {
1492 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1493 &pkts_compl, &bytes_compl);
1494 sw_cons++;
1495 }
1496
1497 netdev_tx_reset_queue(
1498 netdev_get_tx_queue(bp->dev,
1499 txdata->txq_index));
1500 }
1501}
1502
1503static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1504{
1505 int i;
1506
1507 for_each_tx_queue_cnic(bp, i) {
1508 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1509 }
1510}
1511
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001512static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1513{
1514 int i;
1515
Merav Sicron55c11942012-11-07 00:45:48 +00001516 for_each_eth_queue(bp, i) {
1517 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001518 }
1519}
1520
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001521static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1522{
1523 struct bnx2x *bp = fp->bp;
1524 int i;
1525
1526 /* ring wasn't allocated */
1527 if (fp->rx_buf_ring == NULL)
1528 return;
1529
1530 for (i = 0; i < NUM_RX_BD; i++) {
1531 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001532 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001533
Eric Dumazete52fcb22011-11-14 06:05:34 +00001534 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001535 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001536 dma_unmap_single(&bp->pdev->dev,
1537 dma_unmap_addr(rx_buf, mapping),
1538 fp->rx_buf_size, DMA_FROM_DEVICE);
1539
Eric Dumazete52fcb22011-11-14 06:05:34 +00001540 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001541 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001542 }
1543}
1544
Merav Sicron55c11942012-11-07 00:45:48 +00001545static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1546{
1547 int j;
1548
1549 for_each_rx_queue_cnic(bp, j) {
1550 bnx2x_free_rx_bds(&bp->fp[j]);
1551 }
1552}
1553
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001554static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1555{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001556 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001557
Merav Sicron55c11942012-11-07 00:45:48 +00001558 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001559 struct bnx2x_fastpath *fp = &bp->fp[j];
1560
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001561 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001562
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001563 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001565 }
1566}
1567
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001568static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001569{
1570 bnx2x_free_tx_skbs_cnic(bp);
1571 bnx2x_free_rx_skbs_cnic(bp);
1572}
1573
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001574void bnx2x_free_skbs(struct bnx2x *bp)
1575{
1576 bnx2x_free_tx_skbs(bp);
1577 bnx2x_free_rx_skbs(bp);
1578}
1579
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001580void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1581{
1582 /* load old values */
1583 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1584
1585 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1586 /* leave all but MAX value */
1587 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1588
1589 /* set new MAX value */
1590 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1591 & FUNC_MF_CFG_MAX_BW_MASK;
1592
1593 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1594 }
1595}
1596
Dmitry Kravkovca924292011-06-14 01:33:08 +00001597/**
1598 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1599 *
1600 * @bp: driver handle
1601 * @nvecs: number of vectors to be released
1602 */
1603static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001604{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001605 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001606
Dmitry Kravkovca924292011-06-14 01:33:08 +00001607 if (nvecs == offset)
1608 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001609
1610 /* VFs don't have a default SB */
1611 if (IS_PF(bp)) {
1612 free_irq(bp->msix_table[offset].vector, bp->dev);
1613 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1614 bp->msix_table[offset].vector);
1615 offset++;
1616 }
Merav Sicron55c11942012-11-07 00:45:48 +00001617
1618 if (CNIC_SUPPORT(bp)) {
1619 if (nvecs == offset)
1620 return;
1621 offset++;
1622 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001623
Dmitry Kravkovca924292011-06-14 01:33:08 +00001624 for_each_eth_queue(bp, i) {
1625 if (nvecs == offset)
1626 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001627 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1628 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001629
1630 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001631 }
1632}
1633
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001634void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001635{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001636 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001637 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1638 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1639
1640 /* vfs don't have a default status block */
1641 if (IS_PF(bp))
1642 nvecs++;
1643
1644 bnx2x_free_msix_irqs(bp, nvecs);
1645 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001646 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001647 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001648}
1649
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001650int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001651{
Ariel Elior1ab44342013-01-01 05:22:23 +00001652 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001653
Ariel Elior1ab44342013-01-01 05:22:23 +00001654 /* VFs don't have a default status block */
1655 if (IS_PF(bp)) {
1656 bp->msix_table[msix_vec].entry = msix_vec;
1657 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1658 bp->msix_table[0].entry);
1659 msix_vec++;
1660 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001661
Merav Sicron55c11942012-11-07 00:45:48 +00001662 /* Cnic requires an msix vector for itself */
1663 if (CNIC_SUPPORT(bp)) {
1664 bp->msix_table[msix_vec].entry = msix_vec;
1665 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1666 msix_vec, bp->msix_table[msix_vec].entry);
1667 msix_vec++;
1668 }
1669
Ariel Elior6383c0b2011-07-14 08:31:57 +00001670 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001671 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001672 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001673 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1674 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001675 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001676 }
1677
Ariel Elior1ab44342013-01-01 05:22:23 +00001678 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1679 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001680
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001681 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1682 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001683 /*
1684 * reconfigure number of tx/rx queues according to available
1685 * MSI-X vectors
1686 */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001687 if (rc == -ENOSPC) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001688 /* Get by with single vector */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1690 if (rc < 0) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001691 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1692 rc);
1693 goto no_msix;
1694 }
1695
1696 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1697 bp->flags |= USING_SINGLE_MSIX_FLAG;
1698
Merav Sicron55c11942012-11-07 00:45:48 +00001699 BNX2X_DEV_INFO("set number of queues to 1\n");
1700 bp->num_ethernet_queues = 1;
1701 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001702 } else if (rc < 0) {
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001703 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001704 goto no_msix;
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001705 } else if (rc < msix_vec) {
1706 /* how less vectors we will have? */
1707 int diff = msix_vec - rc;
1708
1709 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1710
1711 /*
1712 * decrease number of queues by number of unallocated entries
1713 */
1714 bp->num_ethernet_queues -= diff;
1715 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1716
1717 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1718 bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001719 }
1720
1721 bp->flags |= USING_MSIX_FLAG;
1722
1723 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001724
1725no_msix:
1726 /* fall to INTx if not enough memory */
1727 if (rc == -ENOMEM)
1728 bp->flags |= DISABLE_MSI_FLAG;
1729
1730 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001731}
1732
1733static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1734{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001735 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001736
Ariel Eliorad5afc82013-01-01 05:22:26 +00001737 /* no default status block for vf */
1738 if (IS_PF(bp)) {
1739 rc = request_irq(bp->msix_table[offset++].vector,
1740 bnx2x_msix_sp_int, 0,
1741 bp->dev->name, bp->dev);
1742 if (rc) {
1743 BNX2X_ERR("request sp irq failed\n");
1744 return -EBUSY;
1745 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001746 }
1747
Merav Sicron55c11942012-11-07 00:45:48 +00001748 if (CNIC_SUPPORT(bp))
1749 offset++;
1750
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001751 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001752 struct bnx2x_fastpath *fp = &bp->fp[i];
1753 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1754 bp->dev->name, i);
1755
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001756 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001757 bnx2x_msix_fp_int, 0, fp->name, fp);
1758 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001759 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1760 bp->msix_table[offset].vector, rc);
1761 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001762 return -EBUSY;
1763 }
1764
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001765 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001766 }
1767
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001768 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001769 if (IS_PF(bp)) {
1770 offset = 1 + CNIC_SUPPORT(bp);
1771 netdev_info(bp->dev,
1772 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1773 bp->msix_table[0].vector,
1774 0, bp->msix_table[offset].vector,
1775 i - 1, bp->msix_table[offset + i - 1].vector);
1776 } else {
1777 offset = CNIC_SUPPORT(bp);
1778 netdev_info(bp->dev,
1779 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1780 0, bp->msix_table[offset].vector,
1781 i - 1, bp->msix_table[offset + i - 1].vector);
1782 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001783 return 0;
1784}
1785
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001786int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001787{
1788 int rc;
1789
1790 rc = pci_enable_msi(bp->pdev);
1791 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001792 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001793 return -1;
1794 }
1795 bp->flags |= USING_MSI_FLAG;
1796
1797 return 0;
1798}
1799
1800static int bnx2x_req_irq(struct bnx2x *bp)
1801{
1802 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001803 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001804
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001805 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001806 flags = 0;
1807 else
1808 flags = IRQF_SHARED;
1809
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001810 if (bp->flags & USING_MSIX_FLAG)
1811 irq = bp->msix_table[0].vector;
1812 else
1813 irq = bp->pdev->irq;
1814
1815 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001816}
1817
Yuval Mintzc957d092013-06-25 08:50:11 +03001818static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001819{
1820 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001821 if (bp->flags & USING_MSIX_FLAG &&
1822 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001823 rc = bnx2x_req_msix_irqs(bp);
1824 if (rc)
1825 return rc;
1826 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001827 rc = bnx2x_req_irq(bp);
1828 if (rc) {
1829 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1830 return rc;
1831 }
1832 if (bp->flags & USING_MSI_FLAG) {
1833 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001834 netdev_info(bp->dev, "using MSI IRQ %d\n",
1835 bp->dev->irq);
1836 }
1837 if (bp->flags & USING_MSIX_FLAG) {
1838 bp->dev->irq = bp->msix_table[0].vector;
1839 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1840 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001841 }
1842 }
1843
1844 return 0;
1845}
1846
Merav Sicron55c11942012-11-07 00:45:48 +00001847static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1848{
1849 int i;
1850
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001851 for_each_rx_queue_cnic(bp, i) {
1852 bnx2x_fp_init_lock(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001853 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001854 }
Merav Sicron55c11942012-11-07 00:45:48 +00001855}
1856
Eric Dumazet1191cb82012-04-27 21:39:21 +00001857static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001858{
1859 int i;
1860
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001861 for_each_eth_queue(bp, i) {
1862 bnx2x_fp_init_lock(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001863 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001864 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001865}
1866
Merav Sicron55c11942012-11-07 00:45:48 +00001867static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1868{
1869 int i;
1870
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001871 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001872 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001873 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1874 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001875 }
Merav Sicron55c11942012-11-07 00:45:48 +00001876}
1877
Eric Dumazet1191cb82012-04-27 21:39:21 +00001878static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001879{
1880 int i;
1881
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001882 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001884 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1885 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001886 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001887}
1888
1889void bnx2x_netif_start(struct bnx2x *bp)
1890{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001891 if (netif_running(bp->dev)) {
1892 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001893 if (CNIC_LOADED(bp))
1894 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001895 bnx2x_int_enable(bp);
1896 if (bp->state == BNX2X_STATE_OPEN)
1897 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001898 }
1899}
1900
1901void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1902{
1903 bnx2x_int_disable_sync(bp, disable_hw);
1904 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001905 if (CNIC_LOADED(bp))
1906 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001907}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001908
Jason Wangf663dd92014-01-10 16:18:26 +08001909u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001910 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001911{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001912 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001913
Merav Sicron55c11942012-11-07 00:45:48 +00001914 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001915 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1916 u16 ether_type = ntohs(hdr->h_proto);
1917
1918 /* Skip VLAN tag if present */
1919 if (ether_type == ETH_P_8021Q) {
1920 struct vlan_ethhdr *vhdr =
1921 (struct vlan_ethhdr *)skb->data;
1922
1923 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1924 }
1925
1926 /* If ethertype is FCoE or FIP - use FCoE ring */
1927 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001928 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001929 }
Merav Sicron55c11942012-11-07 00:45:48 +00001930
David S. Miller823dcd22011-08-20 10:39:12 -07001931 /* select a non-FCoE queue */
Daniel Borkmann99932d42014-02-16 15:55:20 +01001932 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001933}
1934
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001935void bnx2x_set_num_queues(struct bnx2x *bp)
1936{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001937 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001938 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001939
Barak Witkowskia3348722012-04-23 03:04:46 +00001940 /* override in STORAGE SD modes */
1941 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001942 bp->num_ethernet_queues = 1;
1943
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001944 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001945 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1946 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001947
1948 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001949}
1950
David S. Miller823dcd22011-08-20 10:39:12 -07001951/**
1952 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1953 *
1954 * @bp: Driver handle
1955 *
1956 * We currently support for at most 16 Tx queues for each CoS thus we will
1957 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1958 * bp->max_cos.
1959 *
1960 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1961 * index after all ETH L2 indices.
1962 *
1963 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1964 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001965 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001966 *
1967 * The proper configuration of skb->queue_mapping is handled by
1968 * bnx2x_select_queue() and __skb_tx_hash().
1969 *
1970 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1971 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1972 */
Merav Sicron55c11942012-11-07 00:45:48 +00001973static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001974{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001975 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001976
Merav Sicron65565882012-06-19 07:48:26 +00001977 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001978 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001979
1980/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001981 if (include_cnic && !NO_FCOE(bp)) {
1982 rx++;
1983 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001984 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001985
1986 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1987 if (rc) {
1988 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1989 return rc;
1990 }
1991 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1992 if (rc) {
1993 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1994 return rc;
1995 }
1996
Merav Sicron51c1a582012-03-18 10:33:38 +00001997 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001998 tx, rx);
1999
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002000 return rc;
2001}
2002
Eric Dumazet1191cb82012-04-27 21:39:21 +00002003static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002004{
2005 int i;
2006
2007 for_each_queue(bp, i) {
2008 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00002009 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002010
2011 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2012 if (IS_FCOE_IDX(i))
2013 /*
2014 * Although there are no IP frames expected to arrive to
2015 * this ring we still want to add an
2016 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2017 * overrun attack.
2018 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00002019 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002020 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00002021 mtu = bp->dev->mtu;
2022 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2023 IP_HEADER_ALIGNMENT_PADDING +
2024 ETH_OVREHEAD +
2025 mtu +
2026 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002027 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00002028 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2029 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2030 else
2031 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002032 }
2033}
2034
Ariel Elior60cad4e2013-09-04 14:09:22 +03002035static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002036{
2037 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002038 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2039
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002040 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002041 * enabled
2042 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002043 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2044 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002045 bp->fp->cl_id +
2046 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002047
2048 /*
2049 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2050 * per-port, so if explicit configuration is needed , do it only
2051 * for a PMF.
2052 *
2053 * For 57712 and newer on the other hand it's a per-function
2054 * configuration.
2055 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002056 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002057}
2058
Ariel Elior60cad4e2013-09-04 14:09:22 +03002059int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2060 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002061{
Yuval Mintz3b603062012-03-18 10:33:39 +00002062 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002063
2064 /* Although RSS is meaningless when there is a single HW queue we
2065 * still need it enabled in order to have HW Rx hash generated.
2066 *
2067 * if (!is_eth_multi(bp))
2068 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2069 */
2070
Dmitry Kravkov96305232012-04-03 18:41:30 +00002071 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002072
2073 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2074
Ariel Elior60cad4e2013-09-04 14:09:22 +03002075 if (enable) {
2076 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002077
Ariel Elior60cad4e2013-09-04 14:09:22 +03002078 /* RSS configuration */
2079 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2081 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2082 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2083 if (rss_obj->udp_rss_v4)
2084 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2085 if (rss_obj->udp_rss_v6)
2086 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002087
2088 if (!CHIP_IS_E1x(bp))
2089 /* valid only for TUNN_MODE_GRE tunnel mode */
2090 __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002091 } else {
2092 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2093 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002094
Dmitry Kravkov96305232012-04-03 18:41:30 +00002095 /* Hash bits */
2096 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002097
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002098 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002099
Dmitry Kravkov96305232012-04-03 18:41:30 +00002100 if (config_hash) {
2101 /* RSS keys */
Ariel Elior60cad4e2013-09-04 14:09:22 +03002102 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002103 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002104 }
2105
Ariel Elior60cad4e2013-09-04 14:09:22 +03002106 if (IS_PF(bp))
2107 return bnx2x_config_rss(bp, &params);
2108 else
2109 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002110}
2111
Eric Dumazet1191cb82012-04-27 21:39:21 +00002112static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002113{
Yuval Mintz3b603062012-03-18 10:33:39 +00002114 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115
2116 /* Prepare parameters for function state transitions */
2117 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2118
2119 func_params.f_obj = &bp->func_obj;
2120 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2121
2122 func_params.params.hw_init.load_phase = load_code;
2123
2124 return bnx2x_func_state_change(bp, &func_params);
2125}
2126
2127/*
2128 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002129 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002130 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002131void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002132{
2133 int rc;
2134 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002135 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002136 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002137
2138 /***************** Cleanup MACs' object first *************************/
2139
2140 /* Wait for completion of requested */
2141 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2142 /* Perform a dry cleanup */
2143 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2144
2145 /* Clean ETH primary MAC */
2146 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002147 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002148 &ramrod_flags);
2149 if (rc != 0)
2150 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2151
2152 /* Cleanup UC list */
2153 vlan_mac_flags = 0;
2154 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2155 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2156 &ramrod_flags);
2157 if (rc != 0)
2158 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2159
2160 /***************** Now clean mcast object *****************************/
2161 rparam.mcast_obj = &bp->mcast_obj;
2162 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2163
Yuval Mintz8b09be52013-08-01 17:30:59 +03002164 /* Add a DEL command... - Since we're doing a driver cleanup only,
2165 * we take a lock surrounding both the initial send and the CONTs,
2166 * as we don't want a true completion to disrupt us in the middle.
2167 */
2168 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002169 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2170 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002171 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2172 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002173
2174 /* ...and wait until all pending commands are cleared */
2175 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2176 while (rc != 0) {
2177 if (rc < 0) {
2178 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2179 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002180 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002181 return;
2182 }
2183
2184 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2185 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002186 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002187}
2188
2189#ifndef BNX2X_STOP_ON_ERROR
2190#define LOAD_ERROR_EXIT(bp, label) \
2191 do { \
2192 (bp)->state = BNX2X_STATE_ERROR; \
2193 goto label; \
2194 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002195
2196#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2197 do { \
2198 bp->cnic_loaded = false; \
2199 goto label; \
2200 } while (0)
2201#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002202#define LOAD_ERROR_EXIT(bp, label) \
2203 do { \
2204 (bp)->state = BNX2X_STATE_ERROR; \
2205 (bp)->panic = 1; \
2206 return -EBUSY; \
2207 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002208#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2209 do { \
2210 bp->cnic_loaded = false; \
2211 (bp)->panic = 1; \
2212 return -EBUSY; \
2213 } while (0)
2214#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002215
Ariel Eliorad5afc82013-01-01 05:22:26 +00002216static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002217{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002218 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2219 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2220 return;
2221}
Yuval Mintz452427b2012-03-26 20:47:07 +00002222
Ariel Eliorad5afc82013-01-01 05:22:26 +00002223static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2224{
Ariel Elior8db573b2013-01-01 05:22:37 +00002225 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002226 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002227
Ariel Eliorad5afc82013-01-01 05:22:26 +00002228 /* number of queues for statistics is number of eth queues + FCoE */
2229 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002230
Ariel Eliorad5afc82013-01-01 05:22:26 +00002231 /* Total number of FW statistics requests =
2232 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2233 * and fcoe l2 queue) stats + num of queues (which includes another 1
2234 * for fcoe l2 queue if applicable)
2235 */
2236 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2237
Ariel Elior8db573b2013-01-01 05:22:37 +00002238 /* vf stats appear in the request list, but their data is allocated by
2239 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2240 * it is used to determine where to place the vf stats queries in the
2241 * request struct
2242 */
2243 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002244 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002245
Ariel Eliorad5afc82013-01-01 05:22:26 +00002246 /* Request is built from stats_query_header and an array of
2247 * stats_query_cmd_group each of which contains
2248 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2249 * configured in the stats_query_header.
2250 */
2251 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002252 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2253 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002254 1 : 0));
2255
Ariel Elior8db573b2013-01-01 05:22:37 +00002256 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2257 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002258 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2259 num_groups * sizeof(struct stats_query_cmd_group);
2260
2261 /* Data for statistics requests + stats_counter
2262 * stats_counter holds per-STORM counters that are incremented
2263 * when STORM has finished with the current request.
2264 * memory for FCoE offloaded statistics are counted anyway,
2265 * even if they will not be sent.
2266 * VF stats are not accounted for here as the data of VF stats is stored
2267 * in memory allocated by the VF, not here.
2268 */
2269 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2270 sizeof(struct per_pf_stats) +
2271 sizeof(struct fcoe_statistics_params) +
2272 sizeof(struct per_queue_stats) * num_queue_stats +
2273 sizeof(struct stats_counter);
2274
Joe Perchescd2b0382014-02-20 13:25:51 -08002275 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2276 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2277 if (!bp->fw_stats)
2278 goto alloc_mem_err;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002279
2280 /* Set shortcuts */
2281 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2282 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2283 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2284 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2285 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2286 bp->fw_stats_req_sz;
2287
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002288 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002289 U64_HI(bp->fw_stats_req_mapping),
2290 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002291 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002292 U64_HI(bp->fw_stats_data_mapping),
2293 U64_LO(bp->fw_stats_data_mapping));
2294 return 0;
2295
2296alloc_mem_err:
2297 bnx2x_free_fw_stats_mem(bp);
2298 BNX2X_ERR("Can't allocate FW stats memory\n");
2299 return -ENOMEM;
2300}
2301
2302/* send load request to mcp and analyze response */
2303static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2304{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002305 u32 param;
2306
Ariel Eliorad5afc82013-01-01 05:22:26 +00002307 /* init fw_seq */
2308 bp->fw_seq =
2309 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2310 DRV_MSG_SEQ_NUMBER_MASK);
2311 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2312
2313 /* Get current FW pulse sequence */
2314 bp->fw_drv_pulse_wr_seq =
2315 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2316 DRV_PULSE_SEQ_MASK);
2317 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2318
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002319 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2320
2321 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2322 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2323
Ariel Eliorad5afc82013-01-01 05:22:26 +00002324 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002325 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002326
2327 /* if mcp fails to respond we must abort */
2328 if (!(*load_code)) {
2329 BNX2X_ERR("MCP response failure, aborting\n");
2330 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002331 }
2332
Ariel Eliorad5afc82013-01-01 05:22:26 +00002333 /* If mcp refused (e.g. other port is in diagnostic mode) we
2334 * must abort
2335 */
2336 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2337 BNX2X_ERR("MCP refused load request, aborting\n");
2338 return -EBUSY;
2339 }
2340 return 0;
2341}
2342
2343/* check whether another PF has already loaded FW to chip. In
2344 * virtualized environments a pf from another VM may have already
2345 * initialized the device including loading FW
2346 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002347int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002348{
2349 /* is another pf loaded on this engine? */
2350 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2351 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2352 /* build my FW version dword */
2353 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2354 (BCM_5710_FW_MINOR_VERSION << 8) +
2355 (BCM_5710_FW_REVISION_VERSION << 16) +
2356 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2357
2358 /* read loaded FW from chip */
2359 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2360
2361 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2362 loaded_fw, my_fw);
2363
2364 /* abort nic load if version mismatch */
2365 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002366 if (print_err)
2367 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2368 loaded_fw, my_fw);
2369 else
2370 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2371 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002372 return -EBUSY;
2373 }
2374 }
2375 return 0;
2376}
2377
2378/* returns the "mcp load_code" according to global load_count array */
2379static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2380{
2381 int path = BP_PATH(bp);
2382
2383 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002384 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2385 bnx2x_load_count[path][2]);
2386 bnx2x_load_count[path][0]++;
2387 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002388 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002389 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2390 bnx2x_load_count[path][2]);
2391 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002392 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002393 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002394 return FW_MSG_CODE_DRV_LOAD_PORT;
2395 else
2396 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2397}
2398
2399/* mark PMF if applicable */
2400static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2401{
2402 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2403 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2404 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2405 bp->port.pmf = 1;
2406 /* We need the barrier to ensure the ordering between the
2407 * writing to bp->port.pmf here and reading it from the
2408 * bnx2x_periodic_task().
2409 */
2410 smp_mb();
2411 } else {
2412 bp->port.pmf = 0;
2413 }
2414
2415 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2416}
2417
2418static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2419{
2420 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2421 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2422 (bp->common.shmem2_base)) {
2423 if (SHMEM2_HAS(bp, dcc_support))
2424 SHMEM2_WR(bp, dcc_support,
2425 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2426 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2427 if (SHMEM2_HAS(bp, afex_driver_support))
2428 SHMEM2_WR(bp, afex_driver_support,
2429 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2430 }
2431
2432 /* Set AFEX default VLAN tag to an invalid value */
2433 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002434}
2435
Eric Dumazet1191cb82012-04-27 21:39:21 +00002436/**
2437 * bnx2x_bz_fp - zero content of the fastpath structure.
2438 *
2439 * @bp: driver handle
2440 * @index: fastpath index to be zeroed
2441 *
2442 * Makes sure the contents of the bp->fp[index].napi is kept
2443 * intact.
2444 */
2445static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2446{
2447 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002448 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002449 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002450 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002451
Eric Dumazet1191cb82012-04-27 21:39:21 +00002452 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002453 if (fp->tpa_info)
2454 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2455 sizeof(struct bnx2x_agg_info));
2456 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002457
2458 /* Restore the NAPI object as it has been already initialized */
2459 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002460 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002461 fp->bp = bp;
2462 fp->index = index;
2463 if (IS_ETH_FP(fp))
2464 fp->max_cos = bp->max_cos;
2465 else
2466 /* Special queues support only one CoS */
2467 fp->max_cos = 1;
2468
Merav Sicron65565882012-06-19 07:48:26 +00002469 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002470 if (IS_FCOE_FP(fp))
2471 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002472 if (IS_ETH_FP(fp))
2473 for_each_cos_in_tx_queue(fp, cos)
2474 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2475 BNX2X_NUM_ETH_QUEUES(bp) + index];
2476
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002477 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002478 * minimal size so it must be set prior to queue memory allocation
2479 */
2480 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2481 (bp->flags & GRO_ENABLE_FLAG &&
2482 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2483 if (bp->flags & TPA_ENABLE_FLAG)
2484 fp->mode = TPA_MODE_LRO;
2485 else if (bp->flags & GRO_ENABLE_FLAG)
2486 fp->mode = TPA_MODE_GRO;
2487
Eric Dumazet1191cb82012-04-27 21:39:21 +00002488 /* We don't want TPA on an FCoE L2 ring */
2489 if (IS_FCOE_FP(fp))
2490 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002491}
2492
2493int bnx2x_load_cnic(struct bnx2x *bp)
2494{
2495 int i, rc, port = BP_PORT(bp);
2496
2497 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2498
2499 mutex_init(&bp->cnic_mutex);
2500
Ariel Eliorad5afc82013-01-01 05:22:26 +00002501 if (IS_PF(bp)) {
2502 rc = bnx2x_alloc_mem_cnic(bp);
2503 if (rc) {
2504 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2505 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2506 }
Merav Sicron55c11942012-11-07 00:45:48 +00002507 }
2508
2509 rc = bnx2x_alloc_fp_mem_cnic(bp);
2510 if (rc) {
2511 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2512 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2513 }
2514
2515 /* Update the number of queues with the cnic queues */
2516 rc = bnx2x_set_real_num_queues(bp, 1);
2517 if (rc) {
2518 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2519 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2520 }
2521
2522 /* Add all CNIC NAPI objects */
2523 bnx2x_add_all_napi_cnic(bp);
2524 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2525 bnx2x_napi_enable_cnic(bp);
2526
2527 rc = bnx2x_init_hw_func_cnic(bp);
2528 if (rc)
2529 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2530
2531 bnx2x_nic_init_cnic(bp);
2532
Ariel Eliorad5afc82013-01-01 05:22:26 +00002533 if (IS_PF(bp)) {
2534 /* Enable Timer scan */
2535 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002536
Ariel Eliorad5afc82013-01-01 05:22:26 +00002537 /* setup cnic queues */
2538 for_each_cnic_queue(bp, i) {
2539 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2540 if (rc) {
2541 BNX2X_ERR("Queue setup failed\n");
2542 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2543 }
Merav Sicron55c11942012-11-07 00:45:48 +00002544 }
2545 }
2546
2547 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002548 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002549
2550 /* re-read iscsi info */
2551 bnx2x_get_iscsi_info(bp);
2552 bnx2x_setup_cnic_irq_info(bp);
2553 bnx2x_setup_cnic_info(bp);
2554 bp->cnic_loaded = true;
2555 if (bp->state == BNX2X_STATE_OPEN)
2556 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2557
Merav Sicron55c11942012-11-07 00:45:48 +00002558 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2559
2560 return 0;
2561
2562#ifndef BNX2X_STOP_ON_ERROR
2563load_error_cnic2:
2564 /* Disable Timer scan */
2565 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2566
2567load_error_cnic1:
2568 bnx2x_napi_disable_cnic(bp);
2569 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002570 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002571 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2572load_error_cnic0:
2573 BNX2X_ERR("CNIC-related load failed\n");
2574 bnx2x_free_fp_mem_cnic(bp);
2575 bnx2x_free_mem_cnic(bp);
2576 return rc;
2577#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002578}
2579
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002580/* must be called with rtnl_lock */
2581int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2582{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002583 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002584 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002585
Merav Sicron55c11942012-11-07 00:45:48 +00002586 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2587 DP(NETIF_MSG_IFUP,
2588 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2589
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002590#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002591 if (unlikely(bp->panic)) {
2592 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002593 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002594 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002595#endif
2596
2597 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2598
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002599 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002600 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2601 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2602 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002603
Ariel Eliorad5afc82013-01-01 05:22:26 +00002604 if (IS_PF(bp))
2605 /* must be called before memory allocation and HW init */
2606 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002607
Ariel Elior6383c0b2011-07-14 08:31:57 +00002608 /*
2609 * Zero fastpath structures preserving invariants like napi, which are
2610 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002611 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002612 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002613 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002614 for_each_queue(bp, i)
2615 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002616 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2617 bp->num_cnic_queues) *
2618 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002619
Merav Sicron55c11942012-11-07 00:45:48 +00002620 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002621
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002622 /* Set the receive queues buffer size */
2623 bnx2x_set_rx_buf_size(bp);
2624
Ariel Eliorad5afc82013-01-01 05:22:26 +00002625 if (IS_PF(bp)) {
2626 rc = bnx2x_alloc_mem(bp);
2627 if (rc) {
2628 BNX2X_ERR("Unable to allocate bp memory\n");
2629 return rc;
2630 }
2631 }
2632
Ariel Eliorad5afc82013-01-01 05:22:26 +00002633 /* need to be done after alloc mem, since it's self adjusting to amount
2634 * of memory available for RSS queues
2635 */
2636 rc = bnx2x_alloc_fp_mem(bp);
2637 if (rc) {
2638 BNX2X_ERR("Unable to allocate memory for fps\n");
2639 LOAD_ERROR_EXIT(bp, load_error0);
2640 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002641
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002642 /* Allocated memory for FW statistics */
2643 if (bnx2x_alloc_fw_stats_mem(bp))
2644 LOAD_ERROR_EXIT(bp, load_error0);
2645
Ariel Elior8d9ac292013-01-01 05:22:27 +00002646 /* request pf to initialize status blocks */
2647 if (IS_VF(bp)) {
2648 rc = bnx2x_vfpf_init(bp);
2649 if (rc)
2650 LOAD_ERROR_EXIT(bp, load_error0);
2651 }
2652
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002653 /* As long as bnx2x_alloc_mem() may possibly update
2654 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002655 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002656 */
Merav Sicron55c11942012-11-07 00:45:48 +00002657 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002658 if (rc) {
2659 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002660 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002661 }
2662
Ariel Elior6383c0b2011-07-14 08:31:57 +00002663 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002664 * this configuration may be overridden by a multi class queue
2665 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002666 */
2667 bnx2x_setup_tc(bp->dev, bp->max_cos);
2668
Merav Sicron26614ba2012-08-27 03:26:19 +00002669 /* Add all NAPI objects */
2670 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002671 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002672 bnx2x_napi_enable(bp);
2673
Ariel Eliorad5afc82013-01-01 05:22:26 +00002674 if (IS_PF(bp)) {
2675 /* set pf load just before approaching the MCP */
2676 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002677
Ariel Eliorad5afc82013-01-01 05:22:26 +00002678 /* if mcp exists send load request and analyze response */
2679 if (!BP_NOMCP(bp)) {
2680 /* attempt to load pf */
2681 rc = bnx2x_nic_load_request(bp, &load_code);
2682 if (rc)
2683 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002684
Ariel Eliorad5afc82013-01-01 05:22:26 +00002685 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002686 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002687 if (rc) {
2688 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002689 LOAD_ERROR_EXIT(bp, load_error2);
2690 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002691 } else {
2692 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002693 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002694
Ariel Eliorad5afc82013-01-01 05:22:26 +00002695 /* mark pmf if applicable */
2696 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002697
Ariel Eliorad5afc82013-01-01 05:22:26 +00002698 /* Init Function state controlling object */
2699 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002700
Ariel Eliorad5afc82013-01-01 05:22:26 +00002701 /* Initialize HW */
2702 rc = bnx2x_init_hw(bp, load_code);
2703 if (rc) {
2704 BNX2X_ERR("HW init failed, aborting\n");
2705 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2706 LOAD_ERROR_EXIT(bp, load_error2);
2707 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002708 }
2709
Yuval Mintzecf01c22013-04-22 02:53:03 +00002710 bnx2x_pre_irq_nic_init(bp);
2711
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002712 /* Connect to IRQs */
2713 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002714 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002715 BNX2X_ERR("setup irqs failed\n");
2716 if (IS_PF(bp))
2717 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002718 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002719 }
2720
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002721 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002722 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002723 /* Setup NIC internals and enable interrupts */
2724 bnx2x_post_irq_nic_init(bp, load_code);
2725
Ariel Eliorad5afc82013-01-01 05:22:26 +00002726 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002727 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002728
Ariel Eliorad5afc82013-01-01 05:22:26 +00002729 /* Set AFEX default VLAN tag to an invalid value */
2730 bp->afex_def_vlan_tag = -1;
2731 bnx2x_nic_load_afex_dcc(bp, load_code);
2732 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2733 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002734 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002735 BNX2X_ERR("Function start failed!\n");
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2737
Merav Sicron55c11942012-11-07 00:45:48 +00002738 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002739 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002740
Ariel Eliorad5afc82013-01-01 05:22:26 +00002741 /* Send LOAD_DONE command to MCP */
2742 if (!BP_NOMCP(bp)) {
2743 load_code = bnx2x_fw_command(bp,
2744 DRV_MSG_CODE_LOAD_DONE, 0);
2745 if (!load_code) {
2746 BNX2X_ERR("MCP response failure, aborting\n");
2747 rc = -EBUSY;
2748 LOAD_ERROR_EXIT(bp, load_error3);
2749 }
2750 }
2751
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002752 /* initialize FW coalescing state machines in RAM */
2753 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002754 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002755
Ariel Elior60cad4e2013-09-04 14:09:22 +03002756 /* setup the leading queue */
2757 rc = bnx2x_setup_leading(bp);
2758 if (rc) {
2759 BNX2X_ERR("Setup leading failed!\n");
2760 LOAD_ERROR_EXIT(bp, load_error3);
2761 }
2762
2763 /* set up the rest of the queues */
2764 for_each_nondefault_eth_queue(bp, i) {
2765 if (IS_PF(bp))
2766 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2767 else /* VF */
2768 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002769 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002770 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002771 LOAD_ERROR_EXIT(bp, load_error3);
2772 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002773 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002774
Ariel Elior60cad4e2013-09-04 14:09:22 +03002775 /* setup rss */
2776 rc = bnx2x_init_rss(bp);
2777 if (rc) {
2778 BNX2X_ERR("PF RSS init failed\n");
2779 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002780 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002781
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002782 /* Now when Clients are configured we are ready to work */
2783 bp->state = BNX2X_STATE_OPEN;
2784
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002785 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002786 if (IS_PF(bp))
2787 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002788 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002789 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2790 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002791 if (rc) {
2792 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002793 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002794 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002795
Ariel Eliorad5afc82013-01-01 05:22:26 +00002796 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002797 bnx2x_update_max_mf_config(bp, bp->pending_max);
2798 bp->pending_max = 0;
2799 }
2800
Ariel Eliorad5afc82013-01-01 05:22:26 +00002801 if (bp->port.pmf) {
2802 rc = bnx2x_initial_phy_init(bp, load_mode);
2803 if (rc)
2804 LOAD_ERROR_EXIT(bp, load_error3);
2805 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002806 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002807
2808 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002809
2810 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002811 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002812
Michal Kalderoneeed0182014-08-17 16:47:44 +03002813 if (bp->flags & PTP_SUPPORTED) {
2814 bnx2x_init_ptp(bp);
2815 bnx2x_configure_ptp_filters(bp);
2816 }
2817 /* Start Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002818 switch (load_mode) {
2819 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002820 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002821 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002822 break;
2823
2824 case LOAD_OPEN:
2825 netif_tx_start_all_queues(bp->dev);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002826 smp_mb__after_atomic();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827 break;
2828
2829 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002830 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002831 bp->state = BNX2X_STATE_DIAG;
2832 break;
2833
2834 default:
2835 break;
2836 }
2837
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002838 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002839 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002840 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002841 bnx2x__link_status_update(bp);
2842
2843 /* start the timer */
2844 mod_timer(&bp->timer, jiffies + bp->current_interval);
2845
Merav Sicron55c11942012-11-07 00:45:48 +00002846 if (CNIC_ENABLED(bp))
2847 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002848
Yuval Mintz42f82772014-03-23 18:12:23 +02002849 if (IS_PF(bp))
2850 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2851
Ariel Eliorad5afc82013-01-01 05:22:26 +00002852 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2853 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002854 u32 val;
2855 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2856 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2857 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2858 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2859 }
2860
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002861 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002862 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002863 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002864 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002865 return -EBUSY;
2866 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002867
Barak Witkowski98768792012-06-19 07:48:31 +00002868 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2869 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2870 bnx2x_dcbx_init(bp, false);
2871
Merav Sicron55c11942012-11-07 00:45:48 +00002872 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2873
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002874 return 0;
2875
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002876#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002877load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002878 if (IS_PF(bp)) {
2879 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002880
Ariel Eliorad5afc82013-01-01 05:22:26 +00002881 /* Clean queueable objects */
2882 bnx2x_squeeze_objects(bp);
2883 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002884
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002885 /* Free SKBs, SGEs, TPA pool and driver internals */
2886 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002887 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002888 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002889
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002890 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002891 bnx2x_free_irq(bp);
2892load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002893 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2895 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2896 }
2897
2898 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002899load_error1:
2900 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002901 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002902
Ariel Elior889b9af2012-01-26 06:01:51 +00002903 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002904 if (IS_PF(bp))
2905 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002906load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002907 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002908 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002909 bnx2x_free_mem(bp);
2910
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002911 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002912#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002913}
2914
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002915int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002916{
2917 u8 rc = 0, cos, i;
2918
2919 /* Wait until tx fastpath tasks complete */
2920 for_each_tx_queue(bp, i) {
2921 struct bnx2x_fastpath *fp = &bp->fp[i];
2922
2923 for_each_cos_in_tx_queue(fp, cos)
2924 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2925 if (rc)
2926 return rc;
2927 }
2928 return 0;
2929}
2930
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002931/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002932int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002933{
2934 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002935 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002936
Merav Sicron55c11942012-11-07 00:45:48 +00002937 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2938
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002939 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002940 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002941 u32 val;
2942 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2943 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2944 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2945 }
2946
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002947 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002948 (bp->state == BNX2X_STATE_CLOSED ||
2949 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002950 /* We can get here if the driver has been unloaded
2951 * during parity error recovery and is either waiting for a
2952 * leader to complete or for other functions to unload and
2953 * then ifdown has been issued. In this case we want to
2954 * unload and let other functions to complete a recovery
2955 * process.
2956 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002957 bp->recovery_state = BNX2X_RECOVERY_DONE;
2958 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002959 bnx2x_release_leader_lock(bp);
2960 smp_mb();
2961
Merav Sicron51c1a582012-03-18 10:33:38 +00002962 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2963 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002964 return -EINVAL;
2965 }
2966
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002967 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002968 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002969 *
2970 * we can get here only after unsuccessful ndo_* callback, during which
2971 * dev->IFF_UP flag is still on.
2972 */
2973 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2974 return 0;
2975
2976 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002977 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2978 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2979 */
2980 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2981 smp_mb();
2982
Ariel Elior78c3bcc2013-06-20 17:39:08 +03002983 /* indicate to VFs that the PF is going down */
2984 bnx2x_iov_channel_down(bp);
2985
Merav Sicron55c11942012-11-07 00:45:48 +00002986 if (CNIC_LOADED(bp))
2987 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2988
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002989 /* Stop Tx */
2990 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002991 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002992
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002993 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002994
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002995 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002996
Ariel Eliorad5afc82013-01-01 05:22:26 +00002997 if (IS_PF(bp)) {
2998 /* Set ALWAYS_ALIVE bit in shmem */
2999 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3000 bnx2x_drv_pulse(bp);
3001 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3002 bnx2x_save_statistics(bp);
3003 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003004
Ariel Eliorad5afc82013-01-01 05:22:26 +00003005 /* wait till consumers catch up with producers in all queues */
3006 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003007
Ariel Elior9b176b62013-01-01 05:22:28 +00003008 /* if VF indicate to PF this function is going down (PF will delete sp
3009 * elements and clear initializations
3010 */
3011 if (IS_VF(bp))
3012 bnx2x_vfpf_close_vf(bp);
3013 else if (unload_mode != UNLOAD_RECOVERY)
3014 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00003015 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003016 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003017 /* Send the UNLOAD_REQUEST to the MCP */
3018 bnx2x_send_unload_req(bp, unload_mode);
3019
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003020 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003021 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003022 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003023 * (the engine which leader will perform the recovery
3024 * last).
3025 */
3026 if (!CHIP_IS_E1x(bp))
3027 bnx2x_pf_disable(bp);
3028
3029 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003030 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00003031 /* Delete all NAPI objects */
3032 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003033 if (CNIC_LOADED(bp))
3034 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003035 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003036 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003037
3038 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00003039 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003040 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003041
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003042 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003043 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003044 * the queueable objects here in case they failed to get cleaned so far.
3045 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003046 if (IS_PF(bp))
3047 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003048
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003049 /* There should be no more pending SP commands at this stage */
3050 bp->sp_state = 0;
3051
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003052 bp->port.pmf = 0;
3053
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003054 /* clear pending work in rtnl task */
3055 bp->sp_rtnl_state = 0;
3056 smp_mb();
3057
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003058 /* Free SKBs, SGEs, TPA pool and driver internals */
3059 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003060 if (CNIC_LOADED(bp))
3061 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003062 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003063 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003064
Ariel Eliorad5afc82013-01-01 05:22:26 +00003065 bnx2x_free_fp_mem(bp);
3066 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003067 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003068
Ariel Eliorad5afc82013-01-01 05:22:26 +00003069 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003070 if (CNIC_LOADED(bp))
3071 bnx2x_free_mem_cnic(bp);
3072 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003073 bnx2x_free_mem(bp);
3074
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003075 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003076 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003077
Yuval Mintz42f82772014-03-23 18:12:23 +02003078 /* Clear driver version indication in shmem */
3079 if (IS_PF(bp))
3080 bnx2x_update_mng_version(bp);
3081
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003082 /* Check if there are pending parity attentions. If there are - set
3083 * RECOVERY_IN_PROGRESS.
3084 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003085 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003086 bnx2x_set_reset_in_progress(bp);
3087
3088 /* Set RESET_IS_GLOBAL if needed */
3089 if (global)
3090 bnx2x_set_reset_global(bp);
3091 }
3092
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003093 /* The last driver must disable a "close the gate" if there is no
3094 * parity attention or "process kill" pending.
3095 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003096 if (IS_PF(bp) &&
3097 !bnx2x_clear_pf_load(bp) &&
3098 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003099 bnx2x_disable_close_the_gate(bp);
3100
Merav Sicron55c11942012-11-07 00:45:48 +00003101 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3102
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003103 return 0;
3104}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003105
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003106int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3107{
3108 u16 pmcsr;
3109
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003110 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003111 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003112 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003113 return 0;
3114 }
3115
Jon Mason29ed74c2013-09-11 11:22:39 -07003116 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003117
3118 switch (state) {
3119 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003120 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3122 PCI_PM_CTRL_PME_STATUS));
3123
3124 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3125 /* delay required during transition out of D3hot */
3126 msleep(20);
3127 break;
3128
3129 case PCI_D3hot:
3130 /* If there are other clients above don't
3131 shut down the power */
3132 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3133 return 0;
3134 /* Don't shut down the power for emulation and FPGA */
3135 if (CHIP_REV_IS_SLOW(bp))
3136 return 0;
3137
3138 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3139 pmcsr |= 3;
3140
3141 if (bp->wol)
3142 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3143
Jon Mason29ed74c2013-09-11 11:22:39 -07003144 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003145 pmcsr);
3146
3147 /* No more memory access after this point until
3148 * device is brought back to D0.
3149 */
3150 break;
3151
3152 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003153 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003154 return -EINVAL;
3155 }
3156 return 0;
3157}
3158
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003159/*
3160 * net_device service functions
3161 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003162static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003163{
3164 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003165 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003166 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3167 napi);
3168 struct bnx2x *bp = fp->bp;
3169
3170 while (1) {
3171#ifdef BNX2X_STOP_ON_ERROR
3172 if (unlikely(bp->panic)) {
3173 napi_complete(napi);
3174 return 0;
3175 }
3176#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003177 if (!bnx2x_fp_lock_napi(fp))
3178 return work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003179
Ariel Elior6383c0b2011-07-14 08:31:57 +00003180 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3182 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003183
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003184 if (bnx2x_has_rx_work(fp)) {
3185 work_done += bnx2x_rx_int(fp, budget - work_done);
3186
3187 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003188 if (work_done >= budget) {
3189 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003190 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003191 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003192 }
3193
3194 /* Fall out from the NAPI loop if needed */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003195 if (!bnx2x_fp_unlock_napi(fp) &&
3196 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003197
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003198 /* No need to update SB for FCoE L2 ring as long as
3199 * it's connected to the default SB and the SB
3200 * has been updated when NAPI was scheduled.
3201 */
3202 if (IS_FCOE_FP(fp)) {
3203 napi_complete(napi);
3204 break;
3205 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003206 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003207 /* bnx2x_has_rx_work() reads the status block,
3208 * thus we need to ensure that status block indices
3209 * have been actually read (bnx2x_update_fpsb_idx)
3210 * prior to this check (bnx2x_has_rx_work) so that
3211 * we won't write the "newer" value of the status block
3212 * to IGU (if there was a DMA right after
3213 * bnx2x_has_rx_work and if there is no rmb, the memory
3214 * reading (bnx2x_update_fpsb_idx) may be postponed
3215 * to right before bnx2x_ack_sb). In this case there
3216 * will never be another interrupt until there is
3217 * another update of the status block, while there
3218 * is still unhandled work.
3219 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003220 rmb();
3221
3222 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3223 napi_complete(napi);
3224 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003225 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003226 "Update index to %d\n", fp->fp_hc_idx);
3227 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3228 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003229 IGU_INT_ENABLE, 1);
3230 break;
3231 }
3232 }
3233 }
3234
3235 return work_done;
3236}
3237
Cong Wange0d10952013-08-01 11:10:25 +08003238#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003239/* must be called with local_bh_disable()d */
3240int bnx2x_low_latency_recv(struct napi_struct *napi)
3241{
3242 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3243 napi);
3244 struct bnx2x *bp = fp->bp;
3245 int found = 0;
3246
3247 if ((bp->state == BNX2X_STATE_CLOSED) ||
3248 (bp->state == BNX2X_STATE_ERROR) ||
3249 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3250 return LL_FLUSH_FAILED;
3251
3252 if (!bnx2x_fp_lock_poll(fp))
3253 return LL_FLUSH_BUSY;
3254
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003255 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003256 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003257
3258 bnx2x_fp_unlock_poll(fp);
3259
3260 return found;
3261}
3262#endif
3263
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003264/* we split the first BD into headers and data BDs
3265 * to ease the pain of our fellow microcode engineers
3266 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003267 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003268static u16 bnx2x_tx_split(struct bnx2x *bp,
3269 struct bnx2x_fp_txdata *txdata,
3270 struct sw_tx_bd *tx_buf,
3271 struct eth_tx_start_bd **tx_bd, u16 hlen,
3272 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003273{
3274 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3275 struct eth_tx_bd *d_tx_bd;
3276 dma_addr_t mapping;
3277 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3278
3279 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003280 h_tx_bd->nbytes = cpu_to_le16(hlen);
3281
Dmitry Kravkov91226792013-03-11 05:17:52 +00003282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3283 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003284
3285 /* now get a new data BD
3286 * (after the pbd) and fill it */
3287 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003288 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003289
3290 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3291 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3292
3293 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3296
3297 /* this marks the BD as one that has no individual mapping */
3298 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3299
3300 DP(NETIF_MSG_TX_QUEUED,
3301 "TSO split data size is %d (%x:%x)\n",
3302 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3303
3304 /* update tx_bd */
3305 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3306
3307 return bd_prod;
3308}
3309
Yuval Mintz86564c32013-01-23 03:21:50 +00003310#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003312static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003313{
Yuval Mintz86564c32013-01-23 03:21:50 +00003314 __sum16 tsum = (__force __sum16) csum;
3315
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003316 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003317 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3318 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003319
3320 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003321 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3322 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003323
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003324 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003325}
3326
Dmitry Kravkov91226792013-03-11 05:17:52 +00003327static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003328{
3329 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003330 __u8 prot = 0;
3331 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003332
3333 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003334 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003335
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003336 protocol = vlan_get_protocol(skb);
3337 if (protocol == htons(ETH_P_IPV6)) {
3338 rc = XMIT_CSUM_V6;
3339 prot = ipv6_hdr(skb)->nexthdr;
3340 } else {
3341 rc = XMIT_CSUM_V4;
3342 prot = ip_hdr(skb)->protocol;
3343 }
3344
3345 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3346 if (inner_ip_hdr(skb)->version == 6) {
3347 rc |= XMIT_CSUM_ENC_V6;
3348 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003349 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003350 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003351 rc |= XMIT_CSUM_ENC_V4;
3352 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003353 rc |= XMIT_CSUM_TCP;
3354 }
3355 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003356 if (prot == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003358
Eric Dumazet36a8f392013-09-29 01:21:32 -07003359 if (skb_is_gso(skb)) {
3360 if (skb_is_gso_v6(skb)) {
3361 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3362 if (rc & XMIT_CSUM_ENC)
3363 rc |= XMIT_GSO_ENC_V6;
3364 } else {
3365 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3366 if (rc & XMIT_CSUM_ENC)
3367 rc |= XMIT_GSO_ENC_V4;
3368 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003369 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003370
3371 return rc;
3372}
3373
3374#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3375/* check if packet requires linearization (packet is too fragmented)
3376 no need to check fragmentation if page size > 8K (there will be no
3377 violation to FW restrictions) */
3378static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3379 u32 xmit_type)
3380{
3381 int to_copy = 0;
3382 int hlen = 0;
3383 int first_bd_sz = 0;
3384
3385 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3386 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3387
3388 if (xmit_type & XMIT_GSO) {
3389 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3390 /* Check if LSO packet needs to be copied:
3391 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3392 int wnd_size = MAX_FETCH_BD - 3;
3393 /* Number of windows to check */
3394 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3395 int wnd_idx = 0;
3396 int frag_idx = 0;
3397 u32 wnd_sum = 0;
3398
3399 /* Headers length */
3400 hlen = (int)(skb_transport_header(skb) - skb->data) +
3401 tcp_hdrlen(skb);
3402
3403 /* Amount of data (w/o headers) on linear part of SKB*/
3404 first_bd_sz = skb_headlen(skb) - hlen;
3405
3406 wnd_sum = first_bd_sz;
3407
3408 /* Calculate the first sum - it's special */
3409 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3410 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003411 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003412
3413 /* If there was data on linear skb data - check it */
3414 if (first_bd_sz > 0) {
3415 if (unlikely(wnd_sum < lso_mss)) {
3416 to_copy = 1;
3417 goto exit_lbl;
3418 }
3419
3420 wnd_sum -= first_bd_sz;
3421 }
3422
3423 /* Others are easier: run through the frag list and
3424 check all windows */
3425 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3426 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003427 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003428
3429 if (unlikely(wnd_sum < lso_mss)) {
3430 to_copy = 1;
3431 break;
3432 }
3433 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003434 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003435 }
3436 } else {
3437 /* in non-LSO too fragmented packet should always
3438 be linearized */
3439 to_copy = 1;
3440 }
3441 }
3442
3443exit_lbl:
3444 if (unlikely(to_copy))
3445 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003446 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003447 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3448 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3449
3450 return to_copy;
3451}
3452#endif
3453
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003454/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003455 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003456 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003457 * @skb: packet skb
3458 * @pbd: parse BD
3459 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003460 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003461static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3462 struct eth_tx_parse_bd_e1x *pbd,
3463 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003464{
3465 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003466 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003467 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003468
3469 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003470 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003471 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003472 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3473 ip_hdr(skb)->daddr,
3474 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003475 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003476 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003477 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3478 &ipv6_hdr(skb)->daddr,
3479 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003480 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003481
Yuval Mintz86564c32013-01-23 03:21:50 +00003482 pbd->global_data |=
3483 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003484}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003485
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003486/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003487 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3488 *
3489 * @bp: driver handle
3490 * @skb: packet skb
3491 * @parsing_data: data to be updated
3492 * @xmit_type: xmit flags
3493 *
3494 * 57712/578xx related, when skb has encapsulation
3495 */
3496static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3497 u32 *parsing_data, u32 xmit_type)
3498{
3499 *parsing_data |=
3500 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3501 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3502 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3503
3504 if (xmit_type & XMIT_CSUM_TCP) {
3505 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3506 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3507 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3508
3509 return skb_inner_transport_header(skb) +
3510 inner_tcp_hdrlen(skb) - skb->data;
3511 }
3512
3513 /* We support checksum offload for TCP and UDP only.
3514 * No need to pass the UDP header length - it's a constant.
3515 */
3516 return skb_inner_transport_header(skb) +
3517 sizeof(struct udphdr) - skb->data;
3518}
3519
3520/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003521 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003522 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003523 * @bp: driver handle
3524 * @skb: packet skb
3525 * @parsing_data: data to be updated
3526 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003527 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003528 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003529 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003530static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3531 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003532{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003533 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003534 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003535 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3536 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003537
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003538 if (xmit_type & XMIT_CSUM_TCP) {
3539 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3540 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3541 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003542
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003543 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003544 }
3545 /* We support checksum offload for TCP and UDP only.
3546 * No need to pass the UDP header length - it's a constant.
3547 */
3548 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003549}
3550
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003551/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003552static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3553 struct eth_tx_start_bd *tx_start_bd,
3554 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003555{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003556 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3557
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003558 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003559 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003560
3561 if (!(xmit_type & XMIT_CSUM_TCP))
3562 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003563}
3564
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003565/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003566 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003567 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003568 * @bp: driver handle
3569 * @skb: packet skb
3570 * @pbd: parse BD to be updated
3571 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003572 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003573static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3574 struct eth_tx_parse_bd_e1x *pbd,
3575 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003576{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003577 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003578
3579 /* for now NS flag is not used in Linux */
3580 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003581 cpu_to_le16(hlen |
3582 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3583 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003584
3585 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003586 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003587
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003588 hlen += pbd->ip_hlen_w;
3589
3590 /* We support checksum offload for TCP and UDP only */
3591 if (xmit_type & XMIT_CSUM_TCP)
3592 hlen += tcp_hdrlen(skb) / 2;
3593 else
3594 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003595
3596 pbd->total_hlen_w = cpu_to_le16(hlen);
3597 hlen = hlen*2;
3598
3599 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003600 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003601
3602 } else {
3603 s8 fix = SKB_CS_OFF(skb); /* signed! */
3604
3605 DP(NETIF_MSG_TX_QUEUED,
3606 "hlen %d fix %d csum before fix %x\n",
3607 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3608
3609 /* HW bug: fixup the CSUM */
3610 pbd->tcp_pseudo_csum =
3611 bnx2x_csum_fix(skb_transport_header(skb),
3612 SKB_CS(skb), fix);
3613
3614 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3615 pbd->tcp_pseudo_csum);
3616 }
3617
3618 return hlen;
3619}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003620
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003621static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3622 struct eth_tx_parse_bd_e2 *pbd_e2,
3623 struct eth_tx_parse_2nd_bd *pbd2,
3624 u16 *global_data,
3625 u32 xmit_type)
3626{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003627 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003628 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003629
Dmitry Kravkove287a752013-03-21 15:38:24 +00003630 /* from outer IP to transport */
3631 hlen_w = (skb_inner_transport_header(skb) -
3632 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003633
3634 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003635 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003636
Dmitry Kravkove287a752013-03-21 15:38:24 +00003637 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003638
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003639 /* outer IP header info */
3640 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003641 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003642 u32 csum = (__force u32)(~iph->check) -
3643 (__force u32)iph->tot_len -
3644 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003645
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003646 outerip_len = iph->ihl << 1;
3647
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003648 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003649 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003650 } else {
3651 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003652 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003653 pbd_e2->data.tunnel_data.flags |=
Dmitry Kravkov05f84612014-08-28 16:54:24 +03003654 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003655 }
3656
3657 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3658
3659 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3660
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003661 /* inner IP header info */
3662 if (xmit_type & XMIT_CSUM_ENC_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003663 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003664
3665 pbd_e2->data.tunnel_data.pseudo_csum =
3666 bswab16(~csum_tcpudp_magic(
3667 inner_ip_hdr(skb)->saddr,
3668 inner_ip_hdr(skb)->daddr,
3669 0, IPPROTO_TCP, 0));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003670 } else {
3671 pbd_e2->data.tunnel_data.pseudo_csum =
3672 bswab16(~csum_ipv6_magic(
3673 &inner_ipv6_hdr(skb)->saddr,
3674 &inner_ipv6_hdr(skb)->daddr,
3675 0, IPPROTO_TCP, 0));
3676 }
3677
3678 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3679
3680 *global_data |=
3681 outerip_off |
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003682 (outerip_len <<
3683 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3684 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3685 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003686
3687 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3688 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3689 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3690 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003691}
3692
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003693static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3694 u32 xmit_type)
3695{
3696 struct ipv6hdr *ipv6;
3697
3698 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3699 return;
3700
3701 if (xmit_type & XMIT_GSO_ENC_V6)
3702 ipv6 = inner_ipv6_hdr(skb);
3703 else /* XMIT_GSO_V6 */
3704 ipv6 = ipv6_hdr(skb);
3705
3706 if (ipv6->nexthdr == NEXTHDR_IPV6)
3707 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3708}
3709
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003710/* called with netif_tx_lock
3711 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3712 * netif_wake_queue()
3713 */
3714netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3715{
3716 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003717
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003718 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003719 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003720 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003721 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003722 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003723 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003724 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003725 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003726 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003727 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003728 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003729 dma_addr_t mapping;
3730 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3731 int i;
3732 u8 hlen = 0;
3733 __le16 pkt_size = 0;
3734 struct ethhdr *eth;
3735 u8 mac_type = UNICAST_ADDRESS;
3736
3737#ifdef BNX2X_STOP_ON_ERROR
3738 if (unlikely(bp->panic))
3739 return NETDEV_TX_BUSY;
3740#endif
3741
Ariel Elior6383c0b2011-07-14 08:31:57 +00003742 txq_index = skb_get_queue_mapping(skb);
3743 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003744
Merav Sicron55c11942012-11-07 00:45:48 +00003745 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003746
Merav Sicron65565882012-06-19 07:48:26 +00003747 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003748
3749 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003750 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003751 txq_index, fp_index, txdata_index); */
3752
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003753 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003754 DP(NETIF_MSG_TX_QUEUED,
3755 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003756 txdata->cid, fp_index, txdata_index, txdata, fp); */
3757
3758 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003759 skb_shinfo(skb)->nr_frags +
3760 BDS_PER_TX_PKT +
3761 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003762 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003763 if (txdata->tx_ring_size == 0) {
3764 struct bnx2x_eth_q_stats *q_stats =
3765 bnx2x_fp_qstats(bp, txdata->parent_fp);
3766 q_stats->driver_filtered_tx_pkt++;
3767 dev_kfree_skb(skb);
3768 return NETDEV_TX_OK;
3769 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003770 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3771 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003772 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003773
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003774 return NETDEV_TX_BUSY;
3775 }
3776
Merav Sicron51c1a582012-03-18 10:33:38 +00003777 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003778 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003779 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003780 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3781 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003782
3783 eth = (struct ethhdr *)skb->data;
3784
3785 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3786 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3787 if (is_broadcast_ether_addr(eth->h_dest))
3788 mac_type = BROADCAST_ADDRESS;
3789 else
3790 mac_type = MULTICAST_ADDRESS;
3791 }
3792
Dmitry Kravkov91226792013-03-11 05:17:52 +00003793#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003794 /* First, check if we need to linearize the skb (due to FW
3795 restrictions). No need to check fragmentation if page size > 8K
3796 (there will be no violation to FW restrictions) */
3797 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3798 /* Statistics of linearization */
3799 bp->lin_cnt++;
3800 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003801 DP(NETIF_MSG_TX_QUEUED,
3802 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003803 dev_kfree_skb_any(skb);
3804 return NETDEV_TX_OK;
3805 }
3806 }
3807#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003808 /* Map skb linear data for DMA */
3809 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3810 skb_headlen(skb), DMA_TO_DEVICE);
3811 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003812 DP(NETIF_MSG_TX_QUEUED,
3813 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003814 dev_kfree_skb_any(skb);
3815 return NETDEV_TX_OK;
3816 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003817 /*
3818 Please read carefully. First we use one BD which we mark as start,
3819 then we have a parsing info BD (used for TSO or xsum),
3820 and only then we have the rest of the TSO BDs.
3821 (don't forget to mark the last one as last,
3822 and to unmap only AFTER you write to the BD ...)
3823 And above all, all pdb sizes are in words - NOT DWORDS!
3824 */
3825
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003826 /* get current pkt produced now - advance it just before sending packet
3827 * since mapping of pages may fail and cause packet to be dropped
3828 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003829 pkt_prod = txdata->tx_pkt_prod;
3830 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003831
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003832 /* get a tx_buf and first BD
3833 * tx_start_bd may be changed during SPLIT,
3834 * but first_bd will always stay first
3835 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003836 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3837 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003838 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003839
3840 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003841
Michal Kalderoneeed0182014-08-17 16:47:44 +03003842 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3843 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3844 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3845 } else if (bp->ptp_tx_skb) {
3846 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3847 } else {
3848 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3849 /* schedule check for Tx timestamp */
3850 bp->ptp_tx_skb = skb_get(skb);
3851 bp->ptp_tx_start = jiffies;
3852 schedule_work(&bp->ptp_task);
3853 }
3854 }
3855
Dmitry Kravkov91226792013-03-11 05:17:52 +00003856 /* header nbd: indirectly zero other flags! */
3857 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003858
3859 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003860 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003861 tx_buf->skb = skb;
3862 tx_buf->flags = 0;
3863
3864 DP(NETIF_MSG_TX_QUEUED,
3865 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003866 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003867
Jesse Grosseab6d182010-10-20 13:56:03 +00003868 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003869 tx_start_bd->vlan_or_ethertype =
3870 cpu_to_le16(vlan_tx_tag_get(skb));
3871 tx_start_bd->bd_flags.as_bitfield |=
3872 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003873 } else {
3874 /* when transmitting in a vf, start bd must hold the ethertype
3875 * for fw to enforce it
3876 */
Yuval Mintzea36475a2014-08-25 17:48:30 +03003877#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov91226792013-03-11 05:17:52 +00003878 if (IS_VF(bp))
Yuval Mintzea36475a2014-08-25 17:48:30 +03003879#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003880 tx_start_bd->vlan_or_ethertype =
3881 cpu_to_le16(ntohs(eth->h_proto));
Yuval Mintzea36475a2014-08-25 17:48:30 +03003882#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov91226792013-03-11 05:17:52 +00003883 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003884 /* used by FW for packet accounting */
3885 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003886#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003887 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003888
Dmitry Kravkov91226792013-03-11 05:17:52 +00003889 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3890
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003891 /* turn on parsing and get a BD */
3892 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003893
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003894 if (xmit_type & XMIT_CSUM)
3895 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003896
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003897 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003898 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003899 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003900
3901 if (xmit_type & XMIT_CSUM_ENC) {
3902 u16 global_data = 0;
3903
3904 /* Set PBD in enc checksum offload case */
3905 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3906 &pbd_e2_parsing_data,
3907 xmit_type);
3908
3909 /* turn on 2nd parsing and get a BD */
3910 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3911
3912 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3913
3914 memset(pbd2, 0, sizeof(*pbd2));
3915
3916 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3917 (skb_inner_network_header(skb) -
3918 skb->data) >> 1;
3919
3920 if (xmit_type & XMIT_GSO_ENC)
3921 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3922 &global_data,
3923 xmit_type);
3924
3925 pbd2->global_data = cpu_to_le16(global_data);
3926
3927 /* add addition parse BD indication to start BD */
3928 SET_FLAG(tx_start_bd->general_data,
3929 ETH_TX_START_BD_PARSE_NBDS, 1);
3930 /* set encapsulation flag in start BD */
3931 SET_FLAG(tx_start_bd->general_data,
3932 ETH_TX_START_BD_TUNNEL_EXIST, 1);
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +03003933
3934 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3935
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003936 nbd++;
3937 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003938 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003939 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3940 &pbd_e2_parsing_data,
3941 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003942 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003943
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003944 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
Yuval Mintzbabe7232014-02-27 15:42:26 +02003945 /* Add the macs to the parsing BD if this is a vf or if
3946 * Tx Switching is enabled.
3947 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003948 if (IS_VF(bp)) {
3949 /* override GRE parameters in BD */
3950 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3951 &pbd_e2->data.mac_addr.src_mid,
3952 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003953 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003954
3955 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3956 &pbd_e2->data.mac_addr.dst_mid,
3957 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003958 eth->h_dest);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003959 } else {
3960 if (bp->flags & TX_SWITCHING)
3961 bnx2x_set_fw_mac_addr(
3962 &pbd_e2->data.mac_addr.dst_hi,
3963 &pbd_e2->data.mac_addr.dst_mid,
3964 &pbd_e2->data.mac_addr.dst_lo,
3965 eth->h_dest);
3966#ifdef BNX2X_STOP_ON_ERROR
3967 /* Enforce security is always set in Stop on Error -
3968 * source mac should be present in the parsing BD
3969 */
3970 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3971 &pbd_e2->data.mac_addr.src_mid,
3972 &pbd_e2->data.mac_addr.src_lo,
3973 eth->h_source);
3974#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003975 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003976
3977 SET_FLAG(pbd_e2_parsing_data,
3978 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003979 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003980 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003981 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003982 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3983 /* Set PBD in checksum offload case */
3984 if (xmit_type & XMIT_CSUM)
3985 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003986
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003987 SET_FLAG(global_data,
3988 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3989 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003990 }
3991
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003992 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003993 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3994 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003995 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3996 pkt_size = tx_start_bd->nbytes;
3997
Merav Sicron51c1a582012-03-18 10:33:38 +00003998 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003999 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004000 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004001 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004002 tx_start_bd->bd_flags.as_bitfield,
4003 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004004
4005 if (xmit_type & XMIT_GSO) {
4006
4007 DP(NETIF_MSG_TX_QUEUED,
4008 "TSO packet len %d hlen %d total len %d tso size %d\n",
4009 skb->len, hlen, skb_headlen(skb),
4010 skb_shinfo(skb)->gso_size);
4011
4012 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4013
Dmitry Kravkov91226792013-03-11 05:17:52 +00004014 if (unlikely(skb_headlen(skb) > hlen)) {
4015 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004016 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4017 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004018 bd_prod);
4019 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004020 if (!CHIP_IS_E1x(bp))
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004021 pbd_e2_parsing_data |=
4022 (skb_shinfo(skb)->gso_size <<
4023 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4024 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004025 else
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004026 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004027 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00004028
4029 /* Set the PBD's parsing_data field if not zero
4030 * (for the chips newer than 57711).
4031 */
4032 if (pbd_e2_parsing_data)
4033 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4034
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004035 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4036
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004037 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004038 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4039 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4040
Eric Dumazet9e903e02011-10-18 21:00:24 +00004041 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4042 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004043 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00004044 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004045
Merav Sicron51c1a582012-03-18 10:33:38 +00004046 DP(NETIF_MSG_TX_QUEUED,
4047 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004048
4049 /* we need unmap all buffers already mapped
4050 * for this SKB;
4051 * first_bd->nbd need to be properly updated
4052 * before call to bnx2x_free_tx_pkt
4053 */
4054 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004055 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00004056 TX_BD(txdata->tx_pkt_prod),
4057 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004058 return NETDEV_TX_OK;
4059 }
4060
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004061 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00004062 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004063 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00004064 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004065
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004066 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4067 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00004068 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4069 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004070 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004071
4072 DP(NETIF_MSG_TX_QUEUED,
4073 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4074 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4075 le16_to_cpu(tx_data_bd->nbytes));
4076 }
4077
4078 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4079
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004080 /* update with actual num BDs */
4081 first_bd->nbd = cpu_to_le16(nbd);
4082
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004083 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4084
4085 /* now send a tx doorbell, counting the next BD
4086 * if the packet contains or ends with it
4087 */
4088 if (TX_BD_POFF(bd_prod) < nbd)
4089 nbd++;
4090
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004091 /* total_pkt_bytes should be set on the first data BD if
4092 * it's not an LSO packet and there is more than one
4093 * data BD. In this case pkt_size is limited by an MTU value.
4094 * However we prefer to set it for an LSO packet (while we don't
4095 * have to) in order to save some CPU cycles in a none-LSO
4096 * case, when we much more care about them.
4097 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004098 if (total_pkt_bd != NULL)
4099 total_pkt_bd->total_pkt_bytes = pkt_size;
4100
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004101 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004102 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004103 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004104 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4105 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4106 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4107 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004108 if (pbd_e2)
4109 DP(NETIF_MSG_TX_QUEUED,
4110 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004111 pbd_e2,
4112 pbd_e2->data.mac_addr.dst_hi,
4113 pbd_e2->data.mac_addr.dst_mid,
4114 pbd_e2->data.mac_addr.dst_lo,
4115 pbd_e2->data.mac_addr.src_hi,
4116 pbd_e2->data.mac_addr.src_mid,
4117 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004118 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004119 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4120
Tom Herbert2df1a702011-11-28 16:33:37 +00004121 netdev_tx_sent_queue(txq, skb->len);
4122
Willem de Bruijn8373c572012-04-27 09:04:06 +00004123 skb_tx_timestamp(skb);
4124
Ariel Elior6383c0b2011-07-14 08:31:57 +00004125 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004126 /*
4127 * Make sure that the BD data is updated before updating the producer
4128 * since FW might read the BD right after the producer is updated.
4129 * This is only applicable for weak-ordered memory model archs such
4130 * as IA-64. The following barrier is also mandatory since FW will
4131 * assumes packets must have BDs.
4132 */
4133 wmb();
4134
Ariel Elior6383c0b2011-07-14 08:31:57 +00004135 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004136 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004137
Ariel Elior6383c0b2011-07-14 08:31:57 +00004138 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004139
4140 mmiowb();
4141
Ariel Elior6383c0b2011-07-14 08:31:57 +00004142 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004143
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004144 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004145 netif_tx_stop_queue(txq);
4146
4147 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4148 * ordering of set_bit() in netif_tx_stop_queue() and read of
4149 * fp->bd_tx_cons */
4150 smp_mb();
4151
Barak Witkowski15192a82012-06-19 07:48:28 +00004152 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004153 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004154 netif_tx_wake_queue(txq);
4155 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004156 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004157
4158 return NETDEV_TX_OK;
4159}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004160
Ariel Elior6383c0b2011-07-14 08:31:57 +00004161/**
4162 * bnx2x_setup_tc - routine to configure net_device for multi tc
4163 *
4164 * @netdev: net device to configure
4165 * @tc: number of traffic classes to enable
4166 *
4167 * callback connected to the ndo_setup_tc function pointer
4168 */
4169int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4170{
4171 int cos, prio, count, offset;
4172 struct bnx2x *bp = netdev_priv(dev);
4173
4174 /* setup tc must be called under rtnl lock */
4175 ASSERT_RTNL();
4176
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004177 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004178 if (!num_tc) {
4179 netdev_reset_tc(dev);
4180 return 0;
4181 }
4182
4183 /* requested to support too many traffic classes */
4184 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004185 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004186 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004187 return -EINVAL;
4188 }
4189
4190 /* declare amount of supported traffic classes */
4191 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004192 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004193 return -EINVAL;
4194 }
4195
4196 /* configure priority to traffic class mapping */
4197 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4198 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004199 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4200 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004201 prio, bp->prio_to_cos[prio]);
4202 }
4203
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004204 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004205 This can be used for ets or pfc, and save the effort of setting
4206 up a multio class queue disc or negotiating DCBX with a switch
4207 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004208 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004209 for (prio = 1; prio < 16; prio++) {
4210 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004211 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004212 } */
4213
4214 /* configure traffic class to transmission queue mapping */
4215 for (cos = 0; cos < bp->max_cos; cos++) {
4216 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004217 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004218 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004219 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4220 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004221 cos, offset, count);
4222 }
4223
4224 return 0;
4225}
4226
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004227/* called with rtnl_lock */
4228int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4229{
4230 struct sockaddr *addr = p;
4231 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004232 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004233
Merav Sicron51c1a582012-03-18 10:33:38 +00004234 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4235 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004236 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004237 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004238
Barak Witkowskia3348722012-04-23 03:04:46 +00004239 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4240 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004241 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004242 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004243 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004244
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004245 if (netif_running(dev)) {
4246 rc = bnx2x_set_eth_mac(bp, false);
4247 if (rc)
4248 return rc;
4249 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004250
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004251 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4252
4253 if (netif_running(dev))
4254 rc = bnx2x_set_eth_mac(bp, true);
4255
4256 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004257}
4258
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004259static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4260{
4261 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4262 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004263 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004264
4265 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004266
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004267 if (IS_FCOE_IDX(fp_index)) {
4268 memset(sb, 0, sizeof(union host_hc_status_block));
4269 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004270 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004271 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004272 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004273 BNX2X_PCI_FREE(sb->e2_sb,
4274 bnx2x_fp(bp, fp_index,
4275 status_blk_mapping),
4276 sizeof(struct host_hc_status_block_e2));
4277 else
4278 BNX2X_PCI_FREE(sb->e1x_sb,
4279 bnx2x_fp(bp, fp_index,
4280 status_blk_mapping),
4281 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004282 }
Merav Sicron55c11942012-11-07 00:45:48 +00004283
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004284 /* Rx */
4285 if (!skip_rx_queue(bp, fp_index)) {
4286 bnx2x_free_rx_bds(fp);
4287
4288 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4289 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4290 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4291 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4292 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4293
4294 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4295 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4296 sizeof(struct eth_fast_path_rx_cqe) *
4297 NUM_RCQ_BD);
4298
4299 /* SGE ring */
4300 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4301 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4302 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4303 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4304 }
4305
4306 /* Tx */
4307 if (!skip_tx_queue(bp, fp_index)) {
4308 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004309 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004310 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004311
Merav Sicron51c1a582012-03-18 10:33:38 +00004312 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004313 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004314 fp_index, cos, txdata->cid);
4315
4316 BNX2X_FREE(txdata->tx_buf_ring);
4317 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4318 txdata->tx_desc_mapping,
4319 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4320 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004321 }
4322 /* end of fastpath */
4323}
4324
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004325static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004326{
4327 int i;
4328 for_each_cnic_queue(bp, i)
4329 bnx2x_free_fp_mem_at(bp, i);
4330}
4331
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004332void bnx2x_free_fp_mem(struct bnx2x *bp)
4333{
4334 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004335 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004336 bnx2x_free_fp_mem_at(bp, i);
4337}
4338
Eric Dumazet1191cb82012-04-27 21:39:21 +00004339static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004340{
4341 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004342 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004343 bnx2x_fp(bp, index, sb_index_values) =
4344 (__le16 *)status_blk.e2_sb->sb.index_values;
4345 bnx2x_fp(bp, index, sb_running_index) =
4346 (__le16 *)status_blk.e2_sb->sb.running_index;
4347 } else {
4348 bnx2x_fp(bp, index, sb_index_values) =
4349 (__le16 *)status_blk.e1x_sb->sb.index_values;
4350 bnx2x_fp(bp, index, sb_running_index) =
4351 (__le16 *)status_blk.e1x_sb->sb.running_index;
4352 }
4353}
4354
Eric Dumazet1191cb82012-04-27 21:39:21 +00004355/* Returns the number of actually allocated BDs */
4356static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4357 int rx_ring_size)
4358{
4359 struct bnx2x *bp = fp->bp;
4360 u16 ring_prod, cqe_ring_prod;
4361 int i, failure_cnt = 0;
4362
4363 fp->rx_comp_cons = 0;
4364 cqe_ring_prod = ring_prod = 0;
4365
4366 /* This routine is called only during fo init so
4367 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4368 */
4369 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004370 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004371 failure_cnt++;
4372 continue;
4373 }
4374 ring_prod = NEXT_RX_IDX(ring_prod);
4375 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4376 WARN_ON(ring_prod <= (i - failure_cnt));
4377 }
4378
4379 if (failure_cnt)
4380 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4381 i - failure_cnt, fp->index);
4382
4383 fp->rx_bd_prod = ring_prod;
4384 /* Limit the CQE producer by the CQE ring size */
4385 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4386 cqe_ring_prod);
4387 fp->rx_pkt = fp->rx_calls = 0;
4388
Barak Witkowski15192a82012-06-19 07:48:28 +00004389 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004390
4391 return i - failure_cnt;
4392}
4393
4394static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4395{
4396 int i;
4397
4398 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4399 struct eth_rx_cqe_next_page *nextpg;
4400
4401 nextpg = (struct eth_rx_cqe_next_page *)
4402 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4403 nextpg->addr_hi =
4404 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4405 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4406 nextpg->addr_lo =
4407 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4408 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4409 }
4410}
4411
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004412static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4413{
4414 union host_hc_status_block *sb;
4415 struct bnx2x_fastpath *fp = &bp->fp[index];
4416 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004417 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004418 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004419
Barak Witkowskia3348722012-04-23 03:04:46 +00004420 if (!bp->rx_ring_size &&
4421 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004422 rx_ring_size = MIN_RX_SIZE_NONTPA;
4423 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004424 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004425 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4426
Yuval Mintz065f8b92012-10-03 04:22:59 +00004427 if (CHIP_IS_E3(bp)) {
4428 u32 cfg = SHMEM_RD(bp,
4429 dev_info.port_hw_config[BP_PORT(bp)].
4430 default_cfg);
4431
4432 /* Decrease ring size for 1G functions */
4433 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4434 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4435 rx_ring_size /= 10;
4436 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004437
David S. Miller8decf862011-09-22 03:23:13 -04004438 /* allocate at least number of buffers required by FW */
4439 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4440 MIN_RX_SIZE_TPA, rx_ring_size);
4441
4442 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004443 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004444 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004445
Yuval Mintz04c46732013-01-23 03:21:46 +00004446 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4447
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004448 /* Common */
4449 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004450
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004451 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004452 /* status blocks */
Joe Perchescd2b0382014-02-20 13:25:51 -08004453 if (!CHIP_IS_E1x(bp)) {
4454 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4455 sizeof(struct host_hc_status_block_e2));
4456 if (!sb->e2_sb)
4457 goto alloc_mem_err;
4458 } else {
4459 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4460 sizeof(struct host_hc_status_block_e1x));
4461 if (!sb->e1x_sb)
4462 goto alloc_mem_err;
4463 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004464 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004465
4466 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4467 * set shortcuts for it.
4468 */
4469 if (!IS_FCOE_IDX(index))
4470 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004471
4472 /* Tx */
4473 if (!skip_tx_queue(bp, index)) {
4474 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004475 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004476 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004477
Merav Sicron51c1a582012-03-18 10:33:38 +00004478 DP(NETIF_MSG_IFUP,
4479 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004480 index, cos);
4481
Joe Perchescd2b0382014-02-20 13:25:51 -08004482 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4483 sizeof(struct sw_tx_bd),
4484 GFP_KERNEL);
4485 if (!txdata->tx_buf_ring)
4486 goto alloc_mem_err;
4487 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4488 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4489 if (!txdata->tx_desc_ring)
4490 goto alloc_mem_err;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004491 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004492 }
4493
4494 /* Rx */
4495 if (!skip_rx_queue(bp, index)) {
4496 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Joe Perchescd2b0382014-02-20 13:25:51 -08004497 bnx2x_fp(bp, index, rx_buf_ring) =
4498 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4499 if (!bnx2x_fp(bp, index, rx_buf_ring))
4500 goto alloc_mem_err;
4501 bnx2x_fp(bp, index, rx_desc_ring) =
4502 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4503 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4504 if (!bnx2x_fp(bp, index, rx_desc_ring))
4505 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004506
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004507 /* Seed all CQEs by 1s */
Joe Perchescd2b0382014-02-20 13:25:51 -08004508 bnx2x_fp(bp, index, rx_comp_ring) =
4509 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4510 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4511 if (!bnx2x_fp(bp, index, rx_comp_ring))
4512 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004513
4514 /* SGE ring */
Joe Perchescd2b0382014-02-20 13:25:51 -08004515 bnx2x_fp(bp, index, rx_page_ring) =
4516 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4517 GFP_KERNEL);
4518 if (!bnx2x_fp(bp, index, rx_page_ring))
4519 goto alloc_mem_err;
4520 bnx2x_fp(bp, index, rx_sge_ring) =
4521 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4522 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4523 if (!bnx2x_fp(bp, index, rx_sge_ring))
4524 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004525 /* RX BD ring */
4526 bnx2x_set_next_page_rx_bd(fp);
4527
4528 /* CQ ring */
4529 bnx2x_set_next_page_rx_cq(fp);
4530
4531 /* BDs */
4532 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4533 if (ring_size < rx_ring_size)
4534 goto alloc_mem_err;
4535 }
4536
4537 return 0;
4538
4539/* handles low memory cases */
4540alloc_mem_err:
4541 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4542 index, ring_size);
4543 /* FW will drop all packets if queue is not big enough,
4544 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004545 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004546 */
4547 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004548 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004549 /* release memory allocated for this queue */
4550 bnx2x_free_fp_mem_at(bp, index);
4551 return -ENOMEM;
4552 }
4553 return 0;
4554}
4555
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004556static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004557{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004558 if (!NO_FCOE(bp))
4559 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004560 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004561 /* we will fail load process instead of mark
4562 * NO_FCOE_FLAG
4563 */
4564 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004565
4566 return 0;
4567}
4568
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004569static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004570{
4571 int i;
4572
4573 /* 1. Allocate FP for leading - fatal if error
4574 * 2. Allocate RSS - fix number of queues if error
4575 */
4576
4577 /* leading */
4578 if (bnx2x_alloc_fp_mem_at(bp, 0))
4579 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004580
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004581 /* RSS */
4582 for_each_nondefault_eth_queue(bp, i)
4583 if (bnx2x_alloc_fp_mem_at(bp, i))
4584 break;
4585
4586 /* handle memory failures */
4587 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4588 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4589
4590 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004591 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004592 if (CNIC_SUPPORT(bp))
4593 /* move non eth FPs next to last eth FP
4594 * must be done in that order
4595 * FCOE_IDX < FWD_IDX < OOO_IDX
4596 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004597
Merav Sicron55c11942012-11-07 00:45:48 +00004598 /* move FCoE fp even NO_FCOE_FLAG is on */
4599 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4600 bp->num_ethernet_queues -= delta;
4601 bp->num_queues = bp->num_ethernet_queues +
4602 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004603 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4604 bp->num_queues + delta, bp->num_queues);
4605 }
4606
4607 return 0;
4608}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004609
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004610void bnx2x_free_mem_bp(struct bnx2x *bp)
4611{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004612 int i;
4613
4614 for (i = 0; i < bp->fp_array_size; i++)
4615 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004616 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004617 kfree(bp->sp_objs);
4618 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004619 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004620 kfree(bp->msix_table);
4621 kfree(bp->ilt);
4622}
4623
Bill Pemberton0329aba2012-12-03 09:24:24 -05004624int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004625{
4626 struct bnx2x_fastpath *fp;
4627 struct msix_entry *tbl;
4628 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004629 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004630 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004631 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004632
Ariel Elior6383c0b2011-07-14 08:31:57 +00004633 /*
4634 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004635 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004636 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004637 msix_table_size = bp->igu_sb_cnt;
4638 if (IS_PF(bp))
4639 msix_table_size++;
4640 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004641
4642 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004643 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004644 bp->fp_array_size = fp_array_size;
4645 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004646
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004647 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004648 if (!fp)
4649 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004650 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004651 fp[i].tpa_info =
4652 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4653 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4654 if (!(fp[i].tpa_info))
4655 goto alloc_err;
4656 }
4657
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004658 bp->fp = fp;
4659
Barak Witkowski15192a82012-06-19 07:48:28 +00004660 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004661 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004662 GFP_KERNEL);
4663 if (!bp->sp_objs)
4664 goto alloc_err;
4665
4666 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004667 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004668 GFP_KERNEL);
4669 if (!bp->fp_stats)
4670 goto alloc_err;
4671
Merav Sicron65565882012-06-19 07:48:26 +00004672 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004673 txq_array_size =
4674 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4675 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4676
4677 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4678 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004679 if (!bp->bnx2x_txq)
4680 goto alloc_err;
4681
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004682 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004683 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004684 if (!tbl)
4685 goto alloc_err;
4686 bp->msix_table = tbl;
4687
4688 /* ilt */
4689 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4690 if (!ilt)
4691 goto alloc_err;
4692 bp->ilt = ilt;
4693
4694 return 0;
4695alloc_err:
4696 bnx2x_free_mem_bp(bp);
4697 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004698}
4699
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004700int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004701{
4702 struct bnx2x *bp = netdev_priv(dev);
4703
4704 if (unlikely(!netif_running(dev)))
4705 return 0;
4706
Yuval Mintz5d07d862012-09-13 02:56:21 +00004707 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004708 return bnx2x_nic_load(bp, LOAD_NORMAL);
4709}
4710
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004711int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4712{
4713 u32 sel_phy_idx = 0;
4714 if (bp->link_params.num_phys <= 1)
4715 return INT_PHY;
4716
4717 if (bp->link_vars.link_up) {
4718 sel_phy_idx = EXT_PHY1;
4719 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4720 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4721 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4722 sel_phy_idx = EXT_PHY2;
4723 } else {
4724
4725 switch (bnx2x_phy_selection(&bp->link_params)) {
4726 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4727 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4728 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4729 sel_phy_idx = EXT_PHY1;
4730 break;
4731 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4732 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4733 sel_phy_idx = EXT_PHY2;
4734 break;
4735 }
4736 }
4737
4738 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004739}
4740int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4741{
4742 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4743 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004744 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004745 * swapping is enabled). So when swapping is enabled, we need to reverse
4746 * the configuration
4747 */
4748
4749 if (bp->link_params.multi_phy_config &
4750 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4751 if (sel_phy_idx == EXT_PHY1)
4752 sel_phy_idx = EXT_PHY2;
4753 else if (sel_phy_idx == EXT_PHY2)
4754 sel_phy_idx = EXT_PHY1;
4755 }
4756 return LINK_CONFIG_IDX(sel_phy_idx);
4757}
4758
Merav Sicron55c11942012-11-07 00:45:48 +00004759#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004760int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4761{
4762 struct bnx2x *bp = netdev_priv(dev);
4763 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4764
4765 switch (type) {
4766 case NETDEV_FCOE_WWNN:
4767 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4768 cp->fcoe_wwn_node_name_lo);
4769 break;
4770 case NETDEV_FCOE_WWPN:
4771 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4772 cp->fcoe_wwn_port_name_lo);
4773 break;
4774 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004775 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004776 return -EINVAL;
4777 }
4778
4779 return 0;
4780}
4781#endif
4782
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004783/* called with rtnl_lock */
4784int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4785{
4786 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004787
4788 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004789 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004790 return -EAGAIN;
4791 }
4792
4793 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004794 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4795 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004796 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004797 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004798
4799 /* This does not race with packet allocation
4800 * because the actual alloc size is
4801 * only updated as part of load
4802 */
4803 dev->mtu = new_mtu;
4804
Michał Mirosław66371c42011-04-12 09:38:23 +00004805 return bnx2x_reload_if_running(dev);
4806}
4807
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004808netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004809 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004810{
4811 struct bnx2x *bp = netdev_priv(dev);
4812
4813 /* TPA requires Rx CSUM offloading */
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004814 if (!(features & NETIF_F_RXCSUM)) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004815 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004816 features &= ~NETIF_F_GRO;
4817 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004818
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004819 /* Note: do not disable SW GRO in kernel when HW GRO is off */
4820 if (bp->disable_tpa)
4821 features &= ~NETIF_F_LRO;
4822
Michał Mirosław66371c42011-04-12 09:38:23 +00004823 return features;
4824}
4825
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004826int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004827{
4828 struct bnx2x *bp = netdev_priv(dev);
4829 u32 flags = bp->flags;
Eric Dumazet8802f572013-05-18 07:14:53 +00004830 u32 changes;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004831 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004832
4833 if (features & NETIF_F_LRO)
4834 flags |= TPA_ENABLE_FLAG;
4835 else
4836 flags &= ~TPA_ENABLE_FLAG;
4837
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004838 if (features & NETIF_F_GRO)
4839 flags |= GRO_ENABLE_FLAG;
4840 else
4841 flags &= ~GRO_ENABLE_FLAG;
4842
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004843 if (features & NETIF_F_LOOPBACK) {
4844 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4845 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4846 bnx2x_reload = true;
4847 }
4848 } else {
4849 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4850 bp->link_params.loopback_mode = LOOPBACK_NONE;
4851 bnx2x_reload = true;
4852 }
4853 }
4854
Eric Dumazet8802f572013-05-18 07:14:53 +00004855 changes = flags ^ bp->flags;
4856
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004857 /* if GRO is changed while LRO is enabled, don't force a reload */
Eric Dumazet8802f572013-05-18 07:14:53 +00004858 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4859 changes &= ~GRO_ENABLE_FLAG;
4860
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004861 /* if GRO is changed while HW TPA is off, don't force a reload */
4862 if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
4863 changes &= ~GRO_ENABLE_FLAG;
4864
Eric Dumazet8802f572013-05-18 07:14:53 +00004865 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004866 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004867
4868 bp->flags = flags;
Michał Mirosław66371c42011-04-12 09:38:23 +00004869
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004870 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004871 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4872 return bnx2x_reload_if_running(dev);
4873 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004874 }
4875
Michał Mirosław66371c42011-04-12 09:38:23 +00004876 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004877}
4878
4879void bnx2x_tx_timeout(struct net_device *dev)
4880{
4881 struct bnx2x *bp = netdev_priv(dev);
4882
4883#ifdef BNX2X_STOP_ON_ERROR
4884 if (!bp->panic)
4885 bnx2x_panic();
4886#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004887
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004888 /* This allows the netif to be shutdown gracefully before resetting */
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004889 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004890}
4891
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004892int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4893{
4894 struct net_device *dev = pci_get_drvdata(pdev);
4895 struct bnx2x *bp;
4896
4897 if (!dev) {
4898 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4899 return -ENODEV;
4900 }
4901 bp = netdev_priv(dev);
4902
4903 rtnl_lock();
4904
4905 pci_save_state(pdev);
4906
4907 if (!netif_running(dev)) {
4908 rtnl_unlock();
4909 return 0;
4910 }
4911
4912 netif_device_detach(dev);
4913
Yuval Mintz5d07d862012-09-13 02:56:21 +00004914 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004915
4916 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4917
4918 rtnl_unlock();
4919
4920 return 0;
4921}
4922
4923int bnx2x_resume(struct pci_dev *pdev)
4924{
4925 struct net_device *dev = pci_get_drvdata(pdev);
4926 struct bnx2x *bp;
4927 int rc;
4928
4929 if (!dev) {
4930 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4931 return -ENODEV;
4932 }
4933 bp = netdev_priv(dev);
4934
4935 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004936 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004937 return -EAGAIN;
4938 }
4939
4940 rtnl_lock();
4941
4942 pci_restore_state(pdev);
4943
4944 if (!netif_running(dev)) {
4945 rtnl_unlock();
4946 return 0;
4947 }
4948
4949 bnx2x_set_power_state(bp, PCI_D0);
4950 netif_device_attach(dev);
4951
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004952 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4953
4954 rtnl_unlock();
4955
4956 return rc;
4957}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004958
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004959void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4960 u32 cid)
4961{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004962 if (!cxt) {
4963 BNX2X_ERR("bad context pointer %p\n", cxt);
4964 return;
4965 }
4966
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004967 /* ustorm cxt validation */
4968 cxt->ustorm_ag_context.cdu_usage =
4969 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4970 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4971 /* xcontext validation */
4972 cxt->xstorm_ag_context.cdu_reserved =
4973 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4974 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4975}
4976
Eric Dumazet1191cb82012-04-27 21:39:21 +00004977static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4978 u8 fw_sb_id, u8 sb_index,
4979 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004980{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004981 u32 addr = BAR_CSTRORM_INTMEM +
4982 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4983 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004984 DP(NETIF_MSG_IFUP,
4985 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4986 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004987}
4988
Eric Dumazet1191cb82012-04-27 21:39:21 +00004989static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4990 u16 fw_sb_id, u8 sb_index,
4991 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004992{
4993 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4994 u32 addr = BAR_CSTRORM_INTMEM +
4995 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004996 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004997 /* clear and set */
4998 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4999 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005000 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00005001 DP(NETIF_MSG_IFUP,
5002 "port %x fw_sb_id %d sb_index %d disable %d\n",
5003 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005004}
5005
5006void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5007 u8 sb_index, u8 disable, u16 usec)
5008{
5009 int port = BP_PORT(bp);
5010 u8 ticks = usec / BNX2X_BTR;
5011
5012 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5013
5014 disable = disable ? 1 : (usec ? 0 : 1);
5015 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5016}
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005017
5018void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5019 u32 verbose)
5020{
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005021 smp_mb__before_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005022 set_bit(flag, &bp->sp_rtnl_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005023 smp_mb__after_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005024 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5025 flag);
5026 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5027}
5028EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);