blob: 6167bb0c71ed90ce52cca451b5722f205ab77139 [file] [log] [blame]
Yuval Mintz4ad79e12015-07-22 09:16:23 +03001/* bnx2x_cmn.c: QLogic Everest network driver.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Yuval Mintz4ad79e12015-07-22 09:16:23 +03004 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
Ariel Elior08f6dd82014-05-27 13:11:36 +030011 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000012 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
Joe Perchesf1deab52011-08-14 12:16:21 +000020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000022#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000023#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000024#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000025#include <linux/ip.h>
Amir Vadaic9931892014-08-25 16:06:54 +030026#include <linux/crash_dump.h>
Yuval Mintz99690852013-01-14 05:11:49 +000027#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000028#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070029#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030030#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000031#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000033#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000035
stephen hemmingera8f47eb2014-01-09 22:20:11 -080036static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
stephen hemmingera8f47eb2014-01-09 22:20:11 -080049 }
50}
51
52static void bnx2x_add_all_napi(struct bnx2x *bp)
53{
54 int i;
55
56 /* Add NAPI objects */
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
stephen hemmingera8f47eb2014-01-09 22:20:11 -080060 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
Michal Schmidt7d0445d2014-02-25 16:04:24 +010065 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
Michal Schmidtff2ad302014-02-25 16:04:25 +010066
67 /* Reduce memory usage in kdump environment by using only one queue */
Amir Vadaic9931892014-08-25 16:06:54 +030068 if (is_kdump_kernel())
Michal Schmidtff2ad302014-02-25 16:04:25 +010069 nq = 1;
70
Michal Schmidt7d0445d2014-02-25 16:04:24 +010071 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
stephen hemmingera8f47eb2014-01-09 22:20:11 -080073}
74
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000075/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000076 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000083 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000085 * source onto the target. Update txdata pointers and related
86 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000092 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000096 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030098 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000099
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000106
Yuval Mintz34d56262013-08-28 01:13:01 +0300107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
Barak Witkowski15192a82012-06-19 07:48:28 +0000112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
Merav Sicron65565882012-06-19 07:48:26 +0000118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
Yuval Mintz4864a162013-01-10 04:53:39 +0000131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000135}
136
Ariel Elior8ca5e172013-01-01 05:22:34 +0000137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000162 }
163}
164
David S. Miller4b87f922013-01-15 15:05:59 -0500165/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000176 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000198{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100205 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
Merav Sicron51c1a582012-03-18 10:33:38 +0000210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000211 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000212
Ariel Elior6383c0b2011-07-14 08:31:57 +0000213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +0300231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000250 /* now free frags */
251 while (nbd > 0) {
252
Ariel Elior6383c0b2011-07-14 08:31:57 +0000253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000262 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
Yuval Mintze1615902015-08-10 12:49:35 +0300265 dev_kfree_skb_any(skb);
Tom Herbert2df1a702011-11-28 16:33:37 +0000266 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000267
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
Ariel Elior6383c0b2011-07-14 08:31:57 +0000274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000275{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000276 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000278 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
Ariel Elior6383c0b2011-07-14 08:31:57 +0000285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000288
Brian King408f14d2019-07-15 16:41:50 -0500289 /* Ensure subsequent loads occur after hw_cons */
290 smp_rmb();
291
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000292 while (sw_cons != hw_cons) {
293 u16 pkt_cons;
294
295 pkt_cons = TX_BD(sw_cons);
296
Merav Sicron51c1a582012-03-18 10:33:38 +0000297 DP(NETIF_MSG_TX_DONE,
298 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000299 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000300
Tom Herbert2df1a702011-11-28 16:33:37 +0000301 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000302 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000303
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000304 sw_cons++;
305 }
306
Tom Herbert2df1a702011-11-28 16:33:37 +0000307 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
308
Ariel Elior6383c0b2011-07-14 08:31:57 +0000309 txdata->tx_pkt_cons = sw_cons;
310 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000311
312 /* Need to make the tx_bd_cons update visible to start_xmit()
313 * before checking for netif_tx_queue_stopped(). Without the
314 * memory barrier, there is a small possibility that
315 * start_xmit() will miss it and cause the queue to be stopped
316 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300317 * On the other hand we need an rmb() here to ensure the proper
318 * ordering of bit testing in the following
319 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000320 */
321 smp_mb();
322
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000323 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000324 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000325 * while it's empty. This could have happen if rx_action() gets
326 * suspended in bnx2x_tx_int() after the condition before
327 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
328 *
329 * stops the queue->sees fresh tx_bd_cons->releases the queue->
330 * sends some packets consuming the whole queue again->
331 * stops the queue
332 */
333
334 __netif_tx_lock(txq, smp_processor_id());
335
336 if ((netif_tx_queue_stopped(txq)) &&
337 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000338 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000339 netif_tx_wake_queue(txq);
340
341 __netif_tx_unlock(txq);
342 }
343 return 0;
344}
345
346static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
347 u16 idx)
348{
349 u16 last_max = fp->last_max_sge;
350
351 if (SUB_S16(idx, last_max) > 0)
352 fp->last_max_sge = idx;
353}
354
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000355static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
356 u16 sge_len,
357 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000358{
359 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000360 u16 last_max, last_elem, first_elem;
361 u16 delta = 0;
362 u16 i;
363
364 if (!sge_len)
365 return;
366
367 /* First mark all used pages */
368 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300369 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000370 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371
372 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000373 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000374
375 /* Here we assume that the last SGE index is the biggest */
376 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000377 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000378 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000379
380 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300381 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
382 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383
384 /* If ring is not full */
385 if (last_elem + 1 != first_elem)
386 last_elem++;
387
388 /* Now update the prod */
389 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
390 if (likely(fp->sge_mask[i]))
391 break;
392
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300393 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
394 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000395 }
396
397 if (delta > 0) {
398 fp->rx_sge_prod += delta;
399 /* clear page-end entries */
400 bnx2x_clear_sge_mask_next_elems(fp);
401 }
402
403 DP(NETIF_MSG_RX_STATUS,
404 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
405 fp->last_max_sge, fp->rx_sge_prod);
406}
407
Yuval Mintz2de67432013-01-23 03:21:43 +0000408/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000409 * CQE (calculated by HW).
410 */
411static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000412 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800413 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000414{
Yuval Mintz2de67432013-01-23 03:21:43 +0000415 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000416 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000417 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
418 enum eth_rss_hash_type htype;
419
420 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800421 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
422 (htype == TCP_IPV6_HASH_TYPE)) ?
423 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
424
Eric Dumazete52fcb22011-11-14 06:05:34 +0000425 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000426 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800427 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000428 return 0;
429}
430
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000431static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000432 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300433 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000434{
435 struct bnx2x *bp = fp->bp;
436 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
437 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
438 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
439 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
441 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000442
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300443 /* print error if current state != stop */
444 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000445 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
446
Eric Dumazete52fcb22011-11-14 06:05:34 +0000447 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300448 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000449 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300450 fp->rx_buf_size, DMA_FROM_DEVICE);
451 /*
452 * ...if it fails - move the skb from the consumer to the producer
453 * and set the current aggregation state as ERROR to drop it
454 * when TPA_STOP arrives.
455 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000456
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300457 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
458 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000459 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300460 tpa_info->tpa_state = BNX2X_TPA_ERROR;
461 return;
462 }
463
Eric Dumazete52fcb22011-11-14 06:05:34 +0000464 /* move empty data from pool to prod */
465 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300466 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000467 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000468 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
469 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
470
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300471 /* move partial skb from cons to pool (don't unmap yet) */
472 *first_buf = *cons_rx_buf;
473
474 /* mark bin state as START */
475 tpa_info->parsing_flags =
476 le16_to_cpu(cqe->pars_flags.flags);
477 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
478 tpa_info->tpa_state = BNX2X_TPA_START;
479 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
480 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800481 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000482 if (fp->mode == TPA_MODE_GRO) {
483 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000484 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000485 tpa_info->gro_size = gro_size;
486 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300487
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000488#ifdef BNX2X_STOP_ON_ERROR
489 fp->tpa_queue_used |= (1 << queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000490 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000491 fp->tpa_queue_used);
492#endif
493}
494
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000495/* Timestamp option length allowed for TPA aggregation:
496 *
497 * nop nop kind length echo val
498 */
499#define TPA_TSTAMP_OPT_LEN 12
500/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000501 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000502 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000503 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000504 * @parsing_flags: parsing flags from the START CQE
505 * @len_on_bd: total length of the first packet for the
506 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000507 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000508 *
509 * Approximate value of the MSS for this aggregation calculated using
510 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000511 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000512 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000513static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000514 u16 len_on_bd, unsigned int pkt_len,
515 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000516{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000517 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300518 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000519 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300520 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521
522 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000523 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300524 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000528 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
529 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000530
531 /* Check if there was a TCP timestamp, if there is it's will
532 * always be 12 bytes length: nop nop kind length echo val.
533 *
534 * Otherwise FW would close the aggregation.
535 */
536 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537 hdrs_len += TPA_TSTAMP_OPT_LEN;
538
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000539 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540
541 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542 * to skb_shinfo(skb)->gso_segs
543 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000544 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000545}
546
Michal Schmidt996dedb2013-09-05 22:13:09 +0200547static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000549{
Eric Dumazet1191cb82012-04-27 21:39:21 +0000550 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
551 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300552 struct bnx2x_alloc_pool *pool = &fp->page_pool;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000553 dma_addr_t mapping;
554
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300555 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
556
557 /* put page reference used by the memory pool, since we
558 * won't be using this page as the mempool anymore.
559 */
560 if (pool->page)
561 put_page(pool->page);
562
563 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
Michal Schmidt5c9ffde2015-12-04 17:22:34 +0100564 if (unlikely(!pool->page))
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300565 return -ENOMEM;
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300566
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300567 pool->offset = 0;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000568 }
569
Michal Schmidt80316122015-06-26 17:50:00 +0200570 mapping = dma_map_page(&bp->pdev->dev, pool->page,
571 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
572 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
573 BNX2X_ERR("Can't map sge\n");
574 return -ENOMEM;
575 }
576
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300577 get_page(pool->page);
578 sw_buf->page = pool->page;
579 sw_buf->offset = pool->offset;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000580
Eric Dumazet1191cb82012-04-27 21:39:21 +0000581 dma_unmap_addr_set(sw_buf, mapping, mapping);
582
583 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
584 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
585
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300586 pool->offset += SGE_PAGE_SIZE;
587
Eric Dumazet1191cb82012-04-27 21:39:21 +0000588 return 0;
589}
590
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000591static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000592 struct bnx2x_agg_info *tpa_info,
593 u16 pages,
594 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300595 struct eth_end_agg_rx_cqe *cqe,
596 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597{
598 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000599 u32 i, frag_len, frag_size;
600 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300601 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000602 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000603
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300604 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000605
606 if (fp->mode == TPA_MODE_GRO) {
607 gro_size = tpa_info->gro_size;
608 full_page = tpa_info->full_page;
609 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000610
611 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000612 if (frag_size)
613 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000614 le16_to_cpu(cqe->pkt_len),
615 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000616
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000617#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000618 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000619 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
620 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300621 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622 bnx2x_panic();
623 return -EINVAL;
624 }
625#endif
626
627 /* Run through the SGL and compose the fragmented skb */
628 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300629 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000630
631 /* FW gives the indices of the SGE as if the ring is an array
632 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000633 if (fp->mode == TPA_MODE_GRO)
634 frag_len = min_t(u32, frag_size, (u32)full_page);
635 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000636 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000637
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000638 rx_pg = &fp->rx_page_ring[sge_idx];
639 old_rx_pg = *rx_pg;
640
641 /* If we fail to allocate a substitute page, we simply stop
642 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200643 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000644 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000645 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000646 return err;
647 }
648
Michal Schmidt80316122015-06-26 17:50:00 +0200649 dma_unmap_page(&bp->pdev->dev,
650 dma_unmap_addr(&old_rx_pg, mapping),
651 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000652 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000653 if (fp->mode == TPA_MODE_LRO)
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300654 skb_fill_page_desc(skb, j, old_rx_pg.page,
655 old_rx_pg.offset, frag_len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000656 else { /* GRO */
657 int rem;
658 int offset = 0;
659 for (rem = frag_len; rem > 0; rem -= gro_size) {
660 int len = rem > gro_size ? gro_size : rem;
661 skb_fill_page_desc(skb, frag_id++,
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300662 old_rx_pg.page,
663 old_rx_pg.offset + offset,
664 len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000665 if (offset)
666 get_page(old_rx_pg.page);
667 offset += len;
668 }
669 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000670
671 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000672 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000673 skb->len += frag_len;
674
675 frag_size -= frag_len;
676 }
677
678 return 0;
679}
680
Eric Dumazetd46d1322012-12-10 12:16:06 +0000681static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
682{
683 if (fp->rx_frag_size)
Alexander Duycke51423d2015-05-06 21:12:31 -0700684 skb_free_frag(data);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000685 else
686 kfree(data);
687}
688
Michal Schmidt996dedb2013-09-05 22:13:09 +0200689static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000690{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200691 if (fp->rx_frag_size) {
692 /* GFP_KERNEL allocations are used only during initialization */
Mel Gormand0164ad2015-11-06 16:28:21 -0800693 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
Michal Schmidt996dedb2013-09-05 22:13:09 +0200694 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000695
Michal Schmidt996dedb2013-09-05 22:13:09 +0200696 return netdev_alloc_frag(fp->rx_frag_size);
697 }
698
699 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000700}
701
Yuval Mintz99690852013-01-14 05:11:49 +0000702#ifdef CONFIG_INET
703static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
704{
705 const struct iphdr *iph = ip_hdr(skb);
706 struct tcphdr *th;
707
708 skb_set_transport_header(skb, sizeof(struct iphdr));
709 th = tcp_hdr(skb);
710
711 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
712 iph->saddr, iph->daddr, 0);
713}
714
715static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
716{
717 struct ipv6hdr *iph = ipv6_hdr(skb);
718 struct tcphdr *th;
719
720 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
721 th = tcp_hdr(skb);
722
723 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
724 &iph->saddr, &iph->daddr, 0);
725}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000726
727static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
728 void (*gro_func)(struct bnx2x*, struct sk_buff*))
729{
730 skb_set_network_header(skb, 0);
731 gro_func(bp, skb);
732 tcp_gro_complete(skb);
733}
Yuval Mintz99690852013-01-14 05:11:49 +0000734#endif
735
736static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737 struct sk_buff *skb)
738{
739#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000740 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000741 switch (be16_to_cpu(skb->protocol)) {
742 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000743 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000744 break;
745 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000746 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000747 break;
748 default:
Michal Schmidt9adab1b2015-12-04 17:22:35 +0100749 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000750 be16_to_cpu(skb->protocol));
751 }
Yuval Mintz99690852013-01-14 05:11:49 +0000752 }
753#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700754 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000755 napi_gro_receive(&fp->napi, skb);
756}
757
Eric Dumazet1191cb82012-04-27 21:39:21 +0000758static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
759 struct bnx2x_agg_info *tpa_info,
760 u16 pages,
761 struct eth_end_agg_rx_cqe *cqe,
762 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000763{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300764 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000765 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300766 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000767 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000768 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300769 u8 old_tpa_state = tpa_info->tpa_state;
770
771 tpa_info->tpa_state = BNX2X_TPA_STOP;
772
773 /* If we there was an error during the handling of the TPA_START -
774 * drop this aggregation.
775 */
776 if (old_tpa_state == BNX2X_TPA_ERROR)
777 goto drop;
778
Eric Dumazete52fcb22011-11-14 06:05:34 +0000779 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200780 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000781 /* Unmap skb in the pool anyway, as we are going to change
782 pool entry status to BNX2X_TPA_STOP even if new skb allocation
783 fails. */
784 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800785 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000786 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000787 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000788
Eric Dumazete52fcb22011-11-14 06:05:34 +0000789 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000790#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800791 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000792 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800793 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000794 bnx2x_panic();
795 return;
796 }
797#endif
798
Eric Dumazete52fcb22011-11-14 06:05:34 +0000799 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000800 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800801 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802
803 skb->protocol = eth_type_trans(skb, bp->dev);
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
805
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000806 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
807 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300808 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000809 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000810 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000811 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000812 DP(NETIF_MSG_RX_STATUS,
813 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000814 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000815 }
816
Eric Dumazete52fcb22011-11-14 06:05:34 +0000817 /* put new data in bin */
818 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000819
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300820 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000821 }
Eric Dumazet07b0f002014-06-26 00:44:02 -0700822 if (new_data)
823 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300824drop:
825 /* drop the packet and keep the buffer in the bin */
826 DP(NETIF_MSG_RX_STATUS,
827 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000828 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000829}
830
Michal Schmidt996dedb2013-09-05 22:13:09 +0200831static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000833{
834 u8 *data;
835 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
836 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
837 dma_addr_t mapping;
838
Michal Schmidt996dedb2013-09-05 22:13:09 +0200839 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000840 if (unlikely(data == NULL))
841 return -ENOMEM;
842
843 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
844 fp->rx_buf_size,
845 DMA_FROM_DEVICE);
846 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000847 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000848 BNX2X_ERR("Can't map rx data\n");
849 return -ENOMEM;
850 }
851
852 rx_buf->data = data;
853 dma_unmap_addr_set(rx_buf, mapping, mapping);
854
855 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
856 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
857
858 return 0;
859}
860
Barak Witkowski15192a82012-06-19 07:48:28 +0000861static
862void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
863 struct bnx2x_fastpath *fp,
864 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000865{
Michal Schmidte4889212012-09-13 12:59:44 +0000866 /* Do nothing if no L4 csum validation was done.
867 * We do not check whether IP csum was validated. For IPv4 we assume
868 * that if the card got as far as validating the L4 csum, it also
869 * validated the IP csum. IPv6 has no IP csum.
870 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000871 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000872 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000873 return;
874
Michal Schmidte4889212012-09-13 12:59:44 +0000875 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000876
877 if (cqe->fast_path_cqe.type_error_flags &
878 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
879 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000880 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000881 else
882 skb->ip_summed = CHECKSUM_UNNECESSARY;
883}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800885static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000886{
887 struct bnx2x *bp = fp->bp;
888 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300889 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000890 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300891 union eth_rx_cqe *cqe;
892 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000893
894#ifdef BNX2X_STOP_ON_ERROR
895 if (unlikely(bp->panic))
896 return 0;
897#endif
Eric W. Biedermanb3529742014-03-14 17:57:59 -0700898 if (budget <= 0)
899 return rx_pkt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000900
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000901 bd_cons = fp->rx_bd_cons;
902 bd_prod = fp->rx_bd_prod;
903 bd_prod_fw = bd_prod;
904 sw_comp_cons = fp->rx_comp_cons;
905 sw_comp_prod = fp->rx_comp_prod;
906
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300907 comp_ring_cons = RCQ_BD(sw_comp_cons);
908 cqe = &fp->rx_comp_ring[comp_ring_cons];
909 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910
911 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300912 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000913
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300914 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915 struct sw_rx_bd *rx_buf = NULL;
916 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000917 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300918 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000919 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000920 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800921 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800922 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000923
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300924#ifdef BNX2X_STOP_ON_ERROR
925 if (unlikely(bp->panic))
926 return 0;
927#endif
928
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000929 bd_prod = RX_BD(bd_prod);
930 bd_cons = RX_BD(bd_cons);
931
wenxiong@linux.vnet.ibm.com9aaae042014-06-03 14:14:46 -0500932 /* A rmb() is required to ensure that the CQE is not read
933 * before it is written by the adapter DMA. PCI ordering
934 * rules will make sure the other fields are written before
935 * the marker at the end of struct eth_fast_path_rx_cqe
936 * but without rmb() a weakly ordered processor can process
937 * stale data. Without the barrier TPA state-machine might
938 * enter inconsistent state and kernel stack might be
939 * provided with incorrect packet description - these lead
940 * to various kernel crashed.
941 */
942 rmb();
943
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300944 cqe_fp_flags = cqe_fp->type_error_flags;
945 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000946
Merav Sicron51c1a582012-03-18 10:33:38 +0000947 DP(NETIF_MSG_RX_STATUS,
948 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
949 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300950 cqe_fp_flags, cqe_fp->status_flags,
951 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000952 le16_to_cpu(cqe_fp->vlan_tag),
953 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000954
955 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300956 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000957 bnx2x_sp_event(fp, cqe);
958 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000959 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000960
Eric Dumazete52fcb22011-11-14 06:05:34 +0000961 rx_buf = &fp->rx_buf_ring[bd_cons];
962 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000963
Eric Dumazete52fcb22011-11-14 06:05:34 +0000964 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000965 struct bnx2x_agg_info *tpa_info;
966 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300967#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000968 /* sanity check */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200969 if (fp->mode == TPA_MODE_DISABLED &&
Eric Dumazete52fcb22011-11-14 06:05:34 +0000970 (CQE_TYPE_START(cqe_fp_type) ||
971 CQE_TYPE_STOP(cqe_fp_type)))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200972 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000973 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300974#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975
Eric Dumazete52fcb22011-11-14 06:05:34 +0000976 if (CQE_TYPE_START(cqe_fp_type)) {
977 u16 queue = cqe_fp->queue_index;
978 DP(NETIF_MSG_RX_STATUS,
979 "calling tpa_start on queue %d\n",
980 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000981
Eric Dumazete52fcb22011-11-14 06:05:34 +0000982 bnx2x_tpa_start(fp, queue,
983 bd_cons, bd_prod,
984 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000985
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000986 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000987 }
988 queue = cqe->end_agg_cqe.queue_index;
989 tpa_info = &fp->tpa_info[queue];
990 DP(NETIF_MSG_RX_STATUS,
991 "calling tpa_stop on queue %d\n",
992 queue);
993
994 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
995 tpa_info->len_on_bd;
996
997 if (fp->mode == TPA_MODE_GRO)
998 pages = (frag_size + tpa_info->full_page - 1) /
999 tpa_info->full_page;
1000 else
1001 pages = SGE_PAGE_ALIGN(frag_size) >>
1002 SGE_PAGE_SHIFT;
1003
1004 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1005 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001006#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001007 if (bp->panic)
1008 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009#endif
1010
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001011 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1012 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001013 }
1014 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001015 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001016 pad = cqe_fp->placement_offset;
1017 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001018 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +00001019 pad + RX_COPY_THRESH,
1020 DMA_FROM_DEVICE);
1021 pad += NET_SKB_PAD;
1022 prefetch(data + pad); /* speedup eth_type_trans() */
1023 /* is this an error packet? */
1024 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001025 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001026 "ERROR flags %x rx packet %u\n",
1027 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +00001028 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001029 goto reuse_rx;
1030 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001031
Eric Dumazete52fcb22011-11-14 06:05:34 +00001032 /* Since we don't have a jumbo ring
1033 * copy small packets if mtu > 1500
1034 */
1035 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1036 (len <= RX_COPY_THRESH)) {
Alexander Duyck45abfb12014-12-09 19:41:17 -08001037 skb = napi_alloc_skb(&fp->napi, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001038 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001039 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001040 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001041 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001042 goto reuse_rx;
1043 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001044 memcpy(skb->data, data + pad, len);
1045 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1046 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001047 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1048 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001049 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001050 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001051 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001052 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001053 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001054 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001055 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001056 bnx2x_fp_qstats(bp, fp)->
1057 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001058 goto next_rx;
1059 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001060 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001062 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1063 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001064 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001065reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001066 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001067 goto next_rx;
1068 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001069 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001070
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001071 skb_put(skb, len);
1072 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001073
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001074 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001075 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1076 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001078 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001079
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001080 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001081 bnx2x_csum_validate(skb, cqe, fp,
1082 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001083
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001084 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001085
Michal Kalderoneeed0182014-08-17 16:47:44 +03001086 /* Check if this packet was timestamped */
Yuval Mintz56daf662014-08-28 08:07:32 +03001087 if (unlikely(cqe->fast_path_cqe.type_error_flags &
Michal Kalderoneeed0182014-08-17 16:47:44 +03001088 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1089 bnx2x_set_rx_ts(bp, skb);
1090
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001091 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1092 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001093 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001094 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001095
Eric Dumazetb59768c2015-11-18 06:30:57 -08001096 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001097next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001098 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001099
1100 bd_cons = NEXT_RX_IDX(bd_cons);
1101 bd_prod = NEXT_RX_IDX(bd_prod);
1102 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1103 rx_pkt++;
1104next_cqe:
1105 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1106 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1107
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001108 /* mark CQE as free */
1109 BNX2X_SEED_CQE(cqe_fp);
1110
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001111 if (rx_pkt == budget)
1112 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001113
1114 comp_ring_cons = RCQ_BD(sw_comp_cons);
1115 cqe = &fp->rx_comp_ring[comp_ring_cons];
1116 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001117 } /* while */
1118
1119 fp->rx_bd_cons = bd_cons;
1120 fp->rx_bd_prod = bd_prod_fw;
1121 fp->rx_comp_cons = sw_comp_cons;
1122 fp->rx_comp_prod = sw_comp_prod;
1123
1124 /* Update producers */
1125 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1126 fp->rx_sge_prod);
1127
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001128 return rx_pkt;
1129}
1130
1131static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1132{
1133 struct bnx2x_fastpath *fp = fp_cookie;
1134 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001135 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001136
Merav Sicron51c1a582012-03-18 10:33:38 +00001137 DP(NETIF_MSG_INTR,
1138 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001139 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001140
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001141 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001142
1143#ifdef BNX2X_STOP_ON_ERROR
1144 if (unlikely(bp->panic))
1145 return IRQ_HANDLED;
1146#endif
1147
1148 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001149 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001150 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001151
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001152 prefetch(&fp->sb_running_index[SM_RX_ID]);
Eric Dumazetf5fbf112014-10-29 17:07:50 -07001153 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001154
1155 return IRQ_HANDLED;
1156}
1157
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001158/* HW Lock for shared dual port PHYs */
1159void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1160{
1161 mutex_lock(&bp->port.phy_mutex);
1162
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001163 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001164}
1165
1166void bnx2x_release_phy_lock(struct bnx2x *bp)
1167{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001168 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001169
1170 mutex_unlock(&bp->port.phy_mutex);
1171}
1172
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001173/* calculates MF speed according to current linespeed and MF configuration */
1174u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1175{
1176 u16 line_speed = bp->link_vars.line_speed;
1177 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001178 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1179 bp->mf_config[BP_VN(bp)]);
1180
1181 /* Calculate the current MAX line speed limit for the MF
1182 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001183 */
Yuval Mintzda3cc2d2015-08-17 08:28:25 +03001184 if (IS_MF_PERCENT_BW(bp))
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001185 line_speed = (line_speed * maxCfg) / 100;
1186 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001187 u16 vn_max_rate = maxCfg * 100;
1188
1189 if (vn_max_rate < line_speed)
1190 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001191 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001192 }
1193
1194 return line_speed;
1195}
1196
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001197/**
1198 * bnx2x_fill_report_data - fill link report data to report
1199 *
1200 * @bp: driver handle
1201 * @data: link state to update
1202 *
1203 * It uses a none-atomic bit operations because is called under the mutex.
1204 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001205static void bnx2x_fill_report_data(struct bnx2x *bp,
1206 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001207{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001208 memset(data, 0, sizeof(*data));
1209
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001210 if (IS_PF(bp)) {
1211 /* Fill the report data: effective line speed */
1212 data->line_speed = bnx2x_get_mf_speed(bp);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001213
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001214 /* Link is down */
1215 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1216 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1217 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001218
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001219 if (!BNX2X_NUM_ETH_QUEUES(bp))
1220 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1221 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001222
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001223 /* Full DUPLEX */
1224 if (bp->link_vars.duplex == DUPLEX_FULL)
1225 __set_bit(BNX2X_LINK_REPORT_FD,
1226 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001227
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001228 /* Rx Flow Control is ON */
1229 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1230 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1231 &data->link_report_flags);
1232
1233 /* Tx Flow Control is ON */
1234 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1235 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1236 &data->link_report_flags);
1237 } else { /* VF */
1238 *data = bp->vf_link_vars;
1239 }
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001240}
1241
1242/**
1243 * bnx2x_link_report - report link status to OS.
1244 *
1245 * @bp: driver handle
1246 *
1247 * Calls the __bnx2x_link_report() under the same locking scheme
1248 * as a link/PHY state managing code to ensure a consistent link
1249 * reporting.
1250 */
1251
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001252void bnx2x_link_report(struct bnx2x *bp)
1253{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001254 bnx2x_acquire_phy_lock(bp);
1255 __bnx2x_link_report(bp);
1256 bnx2x_release_phy_lock(bp);
1257}
1258
1259/**
1260 * __bnx2x_link_report - report link status to OS.
1261 *
1262 * @bp: driver handle
1263 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001264 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001265 * Should be called under the phy_lock.
1266 */
1267void __bnx2x_link_report(struct bnx2x *bp)
1268{
1269 struct bnx2x_link_report_data cur_data;
1270
Sudarsana Reddy Kalluru21fe14f2018-06-28 04:52:15 -07001271 if (bp->force_link_down) {
1272 bp->link_vars.link_up = 0;
1273 return;
1274 }
1275
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001276 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001277 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001278 bnx2x_read_mf_cfg(bp);
1279
1280 /* Read the current link report info */
1281 bnx2x_fill_report_data(bp, &cur_data);
1282
1283 /* Don't report link down or exactly the same link status twice */
1284 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1285 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1286 &bp->last_reported_link.link_report_flags) &&
1287 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1288 &cur_data.link_report_flags)))
1289 return;
1290
1291 bp->link_cnt++;
1292
1293 /* We are going to report a new link parameters now -
1294 * remember the current data for the next time.
1295 */
1296 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1297
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001298 /* propagate status to VFs */
1299 if (IS_PF(bp))
1300 bnx2x_iov_link_update(bp);
1301
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001302 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1303 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001304 netif_carrier_off(bp->dev);
1305 netdev_err(bp->dev, "NIC Link is Down\n");
1306 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001307 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001308 const char *duplex;
1309 const char *flow;
1310
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001311 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001312
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001313 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1314 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001315 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001316 else
Joe Perches94f05b02011-08-14 12:16:20 +00001317 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001318
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001319 /* Handle the FC at the end so that only these flags would be
1320 * possibly set. This way we may easily check if there is no FC
1321 * enabled.
1322 */
1323 if (cur_data.link_report_flags) {
1324 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1325 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001326 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1327 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001328 flow = "ON - receive & transmit";
1329 else
1330 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001331 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001332 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001333 }
Joe Perches94f05b02011-08-14 12:16:20 +00001334 } else {
1335 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001336 }
Joe Perches94f05b02011-08-14 12:16:20 +00001337 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1338 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001339 }
1340}
1341
Eric Dumazet1191cb82012-04-27 21:39:21 +00001342static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1343{
1344 int i;
1345
1346 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1347 struct eth_rx_sge *sge;
1348
1349 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1350 sge->addr_hi =
1351 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1352 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1353
1354 sge->addr_lo =
1355 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1356 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1357 }
1358}
1359
1360static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1361 struct bnx2x_fastpath *fp, int last)
1362{
1363 int i;
1364
1365 for (i = 0; i < last; i++) {
1366 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1367 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1368 u8 *data = first_buf->data;
1369
1370 if (data == NULL) {
1371 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1372 continue;
1373 }
1374 if (tpa_info->tpa_state == BNX2X_TPA_START)
1375 dma_unmap_single(&bp->pdev->dev,
1376 dma_unmap_addr(first_buf, mapping),
1377 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001378 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001379 first_buf->data = NULL;
1380 }
1381}
1382
Merav Sicron55c11942012-11-07 00:45:48 +00001383void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1384{
1385 int j;
1386
1387 for_each_rx_queue_cnic(bp, j) {
1388 struct bnx2x_fastpath *fp = &bp->fp[j];
1389
1390 fp->rx_bd_cons = 0;
1391
1392 /* Activate BD ring */
1393 /* Warning!
1394 * this will generate an interrupt (to the TSTORM)
1395 * must only be done after chip is initialized
1396 */
1397 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1398 fp->rx_sge_prod);
1399 }
1400}
1401
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001402void bnx2x_init_rx_rings(struct bnx2x *bp)
1403{
1404 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001405 u16 ring_prod;
1406 int i, j;
1407
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001408 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001409 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001410 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001411
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001412 DP(NETIF_MSG_IFUP,
1413 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1414
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001415 if (fp->mode != TPA_MODE_DISABLED) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001416 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001417 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001418 struct bnx2x_agg_info *tpa_info =
1419 &fp->tpa_info[i];
1420 struct sw_rx_bd *first_buf =
1421 &tpa_info->first_buf;
1422
Michal Schmidt996dedb2013-09-05 22:13:09 +02001423 first_buf->data =
1424 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001425 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001426 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1427 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001428 bnx2x_free_tpa_pool(bp, fp, i);
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001429 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001430 break;
1431 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001432 dma_unmap_addr_set(first_buf, mapping, 0);
1433 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001434 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001435
1436 /* "next page" elements initialization */
1437 bnx2x_set_next_page_sgl(fp);
1438
1439 /* set SGEs bit mask */
1440 bnx2x_init_sge_ring_bit_mask(fp);
1441
1442 /* Allocate SGEs and initialize the ring elements */
1443 for (i = 0, ring_prod = 0;
1444 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1445
Michal Schmidt996dedb2013-09-05 22:13:09 +02001446 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1447 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001448 BNX2X_ERR("was only able to allocate %d rx sges\n",
1449 i);
1450 BNX2X_ERR("disabling TPA for queue[%d]\n",
1451 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001452 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001453 bnx2x_free_rx_sge_range(bp, fp,
1454 ring_prod);
1455 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001456 MAX_AGG_QS(bp));
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001457 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001458 ring_prod = 0;
1459 break;
1460 }
1461 ring_prod = NEXT_SGE_IDX(ring_prod);
1462 }
1463
1464 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001465 }
1466 }
1467
Merav Sicron55c11942012-11-07 00:45:48 +00001468 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001469 struct bnx2x_fastpath *fp = &bp->fp[j];
1470
1471 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001473 /* Activate BD ring */
1474 /* Warning!
1475 * this will generate an interrupt (to the TSTORM)
1476 * must only be done after chip is initialized
1477 */
1478 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1479 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001480
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001481 if (j != 0)
1482 continue;
1483
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001484 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001485 REG_WR(bp, BAR_USTRORM_INTMEM +
1486 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1487 U64_LO(fp->rx_comp_mapping));
1488 REG_WR(bp, BAR_USTRORM_INTMEM +
1489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1490 U64_HI(fp->rx_comp_mapping));
1491 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001492 }
1493}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001494
Merav Sicron55c11942012-11-07 00:45:48 +00001495static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1496{
1497 u8 cos;
1498 struct bnx2x *bp = fp->bp;
1499
1500 for_each_cos_in_tx_queue(fp, cos) {
1501 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1502 unsigned pkts_compl = 0, bytes_compl = 0;
1503
1504 u16 sw_prod = txdata->tx_pkt_prod;
1505 u16 sw_cons = txdata->tx_pkt_cons;
1506
1507 while (sw_cons != sw_prod) {
1508 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1509 &pkts_compl, &bytes_compl);
1510 sw_cons++;
1511 }
1512
1513 netdev_tx_reset_queue(
1514 netdev_get_tx_queue(bp->dev,
1515 txdata->txq_index));
1516 }
1517}
1518
1519static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1520{
1521 int i;
1522
1523 for_each_tx_queue_cnic(bp, i) {
1524 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1525 }
1526}
1527
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001528static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1529{
1530 int i;
1531
Merav Sicron55c11942012-11-07 00:45:48 +00001532 for_each_eth_queue(bp, i) {
1533 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001534 }
1535}
1536
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001537static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1538{
1539 struct bnx2x *bp = fp->bp;
1540 int i;
1541
1542 /* ring wasn't allocated */
1543 if (fp->rx_buf_ring == NULL)
1544 return;
1545
1546 for (i = 0; i < NUM_RX_BD; i++) {
1547 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001548 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001549
Eric Dumazete52fcb22011-11-14 06:05:34 +00001550 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001551 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001552 dma_unmap_single(&bp->pdev->dev,
1553 dma_unmap_addr(rx_buf, mapping),
1554 fp->rx_buf_size, DMA_FROM_DEVICE);
1555
Eric Dumazete52fcb22011-11-14 06:05:34 +00001556 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001557 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001558 }
1559}
1560
Merav Sicron55c11942012-11-07 00:45:48 +00001561static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1562{
1563 int j;
1564
1565 for_each_rx_queue_cnic(bp, j) {
1566 bnx2x_free_rx_bds(&bp->fp[j]);
1567 }
1568}
1569
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001570static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1571{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001572 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001573
Merav Sicron55c11942012-11-07 00:45:48 +00001574 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001575 struct bnx2x_fastpath *fp = &bp->fp[j];
1576
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001577 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001578
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001579 if (fp->mode != TPA_MODE_DISABLED)
David S. Miller8decf862011-09-22 03:23:13 -04001580 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001581 }
1582}
1583
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001584static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001585{
1586 bnx2x_free_tx_skbs_cnic(bp);
1587 bnx2x_free_rx_skbs_cnic(bp);
1588}
1589
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001590void bnx2x_free_skbs(struct bnx2x *bp)
1591{
1592 bnx2x_free_tx_skbs(bp);
1593 bnx2x_free_rx_skbs(bp);
1594}
1595
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001596void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1597{
1598 /* load old values */
1599 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1600
1601 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1602 /* leave all but MAX value */
1603 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1604
1605 /* set new MAX value */
1606 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1607 & FUNC_MF_CFG_MAX_BW_MASK;
1608
1609 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1610 }
1611}
1612
Dmitry Kravkovca924292011-06-14 01:33:08 +00001613/**
1614 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1615 *
1616 * @bp: driver handle
1617 * @nvecs: number of vectors to be released
1618 */
1619static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001620{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001621 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001622
Dmitry Kravkovca924292011-06-14 01:33:08 +00001623 if (nvecs == offset)
1624 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001625
1626 /* VFs don't have a default SB */
1627 if (IS_PF(bp)) {
1628 free_irq(bp->msix_table[offset].vector, bp->dev);
1629 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1630 bp->msix_table[offset].vector);
1631 offset++;
1632 }
Merav Sicron55c11942012-11-07 00:45:48 +00001633
1634 if (CNIC_SUPPORT(bp)) {
1635 if (nvecs == offset)
1636 return;
1637 offset++;
1638 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001639
Dmitry Kravkovca924292011-06-14 01:33:08 +00001640 for_each_eth_queue(bp, i) {
1641 if (nvecs == offset)
1642 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001643 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1644 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001645
1646 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001647 }
1648}
1649
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001650void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001651{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001652 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001653 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1654 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1655
1656 /* vfs don't have a default status block */
1657 if (IS_PF(bp))
1658 nvecs++;
1659
1660 bnx2x_free_msix_irqs(bp, nvecs);
1661 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001662 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001663 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001664}
1665
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001666int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001667{
Ariel Elior1ab44342013-01-01 05:22:23 +00001668 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001669
Ariel Elior1ab44342013-01-01 05:22:23 +00001670 /* VFs don't have a default status block */
1671 if (IS_PF(bp)) {
1672 bp->msix_table[msix_vec].entry = msix_vec;
1673 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1674 bp->msix_table[0].entry);
1675 msix_vec++;
1676 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001677
Merav Sicron55c11942012-11-07 00:45:48 +00001678 /* Cnic requires an msix vector for itself */
1679 if (CNIC_SUPPORT(bp)) {
1680 bp->msix_table[msix_vec].entry = msix_vec;
1681 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1682 msix_vec, bp->msix_table[msix_vec].entry);
1683 msix_vec++;
1684 }
1685
Ariel Elior6383c0b2011-07-14 08:31:57 +00001686 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001687 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001688 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001689 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1690 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001691 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001692 }
1693
Ariel Elior1ab44342013-01-01 05:22:23 +00001694 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1695 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001696
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001697 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1698 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001699 /*
1700 * reconfigure number of tx/rx queues according to available
1701 * MSI-X vectors
1702 */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001703 if (rc == -ENOSPC) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001704 /* Get by with single vector */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001705 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1706 if (rc < 0) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001707 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1708 rc);
1709 goto no_msix;
1710 }
1711
1712 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1713 bp->flags |= USING_SINGLE_MSIX_FLAG;
1714
Merav Sicron55c11942012-11-07 00:45:48 +00001715 BNX2X_DEV_INFO("set number of queues to 1\n");
1716 bp->num_ethernet_queues = 1;
1717 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001718 } else if (rc < 0) {
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001719 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001720 goto no_msix;
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001721 } else if (rc < msix_vec) {
1722 /* how less vectors we will have? */
1723 int diff = msix_vec - rc;
1724
1725 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1726
1727 /*
1728 * decrease number of queues by number of unallocated entries
1729 */
1730 bp->num_ethernet_queues -= diff;
1731 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1732
1733 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1734 bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001735 }
1736
1737 bp->flags |= USING_MSIX_FLAG;
1738
1739 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001740
1741no_msix:
1742 /* fall to INTx if not enough memory */
1743 if (rc == -ENOMEM)
1744 bp->flags |= DISABLE_MSI_FLAG;
1745
1746 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001747}
1748
1749static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1750{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001751 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001752
Ariel Eliorad5afc82013-01-01 05:22:26 +00001753 /* no default status block for vf */
1754 if (IS_PF(bp)) {
1755 rc = request_irq(bp->msix_table[offset++].vector,
1756 bnx2x_msix_sp_int, 0,
1757 bp->dev->name, bp->dev);
1758 if (rc) {
1759 BNX2X_ERR("request sp irq failed\n");
1760 return -EBUSY;
1761 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001762 }
1763
Merav Sicron55c11942012-11-07 00:45:48 +00001764 if (CNIC_SUPPORT(bp))
1765 offset++;
1766
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001767 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001768 struct bnx2x_fastpath *fp = &bp->fp[i];
1769 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1770 bp->dev->name, i);
1771
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001772 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001773 bnx2x_msix_fp_int, 0, fp->name, fp);
1774 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001775 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1776 bp->msix_table[offset].vector, rc);
1777 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001778 return -EBUSY;
1779 }
1780
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001781 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001782 }
1783
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001784 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001785 if (IS_PF(bp)) {
1786 offset = 1 + CNIC_SUPPORT(bp);
1787 netdev_info(bp->dev,
1788 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1789 bp->msix_table[0].vector,
1790 0, bp->msix_table[offset].vector,
1791 i - 1, bp->msix_table[offset + i - 1].vector);
1792 } else {
1793 offset = CNIC_SUPPORT(bp);
1794 netdev_info(bp->dev,
1795 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1796 0, bp->msix_table[offset].vector,
1797 i - 1, bp->msix_table[offset + i - 1].vector);
1798 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001799 return 0;
1800}
1801
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001802int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001803{
1804 int rc;
1805
1806 rc = pci_enable_msi(bp->pdev);
1807 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001808 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001809 return -1;
1810 }
1811 bp->flags |= USING_MSI_FLAG;
1812
1813 return 0;
1814}
1815
1816static int bnx2x_req_irq(struct bnx2x *bp)
1817{
1818 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001819 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001820
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001821 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001822 flags = 0;
1823 else
1824 flags = IRQF_SHARED;
1825
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001826 if (bp->flags & USING_MSIX_FLAG)
1827 irq = bp->msix_table[0].vector;
1828 else
1829 irq = bp->pdev->irq;
1830
1831 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001832}
1833
Yuval Mintzc957d092013-06-25 08:50:11 +03001834static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001835{
1836 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001837 if (bp->flags & USING_MSIX_FLAG &&
1838 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001839 rc = bnx2x_req_msix_irqs(bp);
1840 if (rc)
1841 return rc;
1842 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001843 rc = bnx2x_req_irq(bp);
1844 if (rc) {
1845 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1846 return rc;
1847 }
1848 if (bp->flags & USING_MSI_FLAG) {
1849 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001850 netdev_info(bp->dev, "using MSI IRQ %d\n",
1851 bp->dev->irq);
1852 }
1853 if (bp->flags & USING_MSIX_FLAG) {
1854 bp->dev->irq = bp->msix_table[0].vector;
1855 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1856 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001857 }
1858 }
1859
1860 return 0;
1861}
1862
Merav Sicron55c11942012-11-07 00:45:48 +00001863static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1864{
1865 int i;
1866
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001867 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001868 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001869 }
Merav Sicron55c11942012-11-07 00:45:48 +00001870}
1871
Eric Dumazet1191cb82012-04-27 21:39:21 +00001872static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001873{
1874 int i;
1875
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001876 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001877 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001878 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001879}
1880
Merav Sicron55c11942012-11-07 00:45:48 +00001881static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1882{
1883 int i;
1884
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001885 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001886 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001887 }
Merav Sicron55c11942012-11-07 00:45:48 +00001888}
1889
Eric Dumazet1191cb82012-04-27 21:39:21 +00001890static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001891{
1892 int i;
1893
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001894 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001895 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001896 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001897}
1898
1899void bnx2x_netif_start(struct bnx2x *bp)
1900{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001901 if (netif_running(bp->dev)) {
1902 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001903 if (CNIC_LOADED(bp))
1904 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001905 bnx2x_int_enable(bp);
1906 if (bp->state == BNX2X_STATE_OPEN)
1907 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001908 }
1909}
1910
1911void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1912{
1913 bnx2x_int_disable_sync(bp, disable_hw);
1914 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001915 if (CNIC_LOADED(bp))
1916 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001917}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001918
Jason Wangf663dd92014-01-10 16:18:26 +08001919u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001920 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001921{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001922 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001923
Merav Sicron55c11942012-11-07 00:45:48 +00001924 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001925 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1926 u16 ether_type = ntohs(hdr->h_proto);
1927
1928 /* Skip VLAN tag if present */
1929 if (ether_type == ETH_P_8021Q) {
1930 struct vlan_ethhdr *vhdr =
1931 (struct vlan_ethhdr *)skb->data;
1932
1933 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1934 }
1935
1936 /* If ethertype is FCoE or FIP - use FCoE ring */
1937 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001938 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001939 }
Merav Sicron55c11942012-11-07 00:45:48 +00001940
David S. Miller823dcd22011-08-20 10:39:12 -07001941 /* select a non-FCoE queue */
Mintz, Yuval96d14522017-06-01 15:57:56 +03001942 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001943}
1944
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001945void bnx2x_set_num_queues(struct bnx2x *bp)
1946{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001947 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001948 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001949
Barak Witkowskia3348722012-04-23 03:04:46 +00001950 /* override in STORAGE SD modes */
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03001951 if (IS_MF_STORAGE_ONLY(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001952 bp->num_ethernet_queues = 1;
1953
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001954 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001955 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1956 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001957
1958 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001959}
1960
David S. Miller823dcd22011-08-20 10:39:12 -07001961/**
1962 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1963 *
1964 * @bp: Driver handle
1965 *
1966 * We currently support for at most 16 Tx queues for each CoS thus we will
1967 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1968 * bp->max_cos.
1969 *
1970 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1971 * index after all ETH L2 indices.
1972 *
1973 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1974 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001975 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001976 *
1977 * The proper configuration of skb->queue_mapping is handled by
1978 * bnx2x_select_queue() and __skb_tx_hash().
1979 *
1980 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1981 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1982 */
Merav Sicron55c11942012-11-07 00:45:48 +00001983static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001984{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001985 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001986
Merav Sicron65565882012-06-19 07:48:26 +00001987 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001988 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001989
1990/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001991 if (include_cnic && !NO_FCOE(bp)) {
1992 rx++;
1993 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001994 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001995
1996 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1997 if (rc) {
1998 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1999 return rc;
2000 }
2001 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2002 if (rc) {
2003 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2004 return rc;
2005 }
2006
Merav Sicron51c1a582012-03-18 10:33:38 +00002007 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002008 tx, rx);
2009
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002010 return rc;
2011}
2012
Eric Dumazet1191cb82012-04-27 21:39:21 +00002013static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002014{
2015 int i;
2016
2017 for_each_queue(bp, i) {
2018 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00002019 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002020
2021 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2022 if (IS_FCOE_IDX(i))
2023 /*
2024 * Although there are no IP frames expected to arrive to
2025 * this ring we still want to add an
2026 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2027 * overrun attack.
2028 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00002029 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002030 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00002031 mtu = bp->dev->mtu;
2032 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2033 IP_HEADER_ALIGNMENT_PADDING +
2034 ETH_OVREHEAD +
2035 mtu +
2036 BNX2X_FW_RX_ALIGN_END;
Scott Wood909dbba2017-04-28 19:17:41 -05002037 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002038 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00002039 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2040 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2041 else
2042 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002043 }
2044}
2045
Ariel Elior60cad4e2013-09-04 14:09:22 +03002046static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002047{
2048 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002049 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2050
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002051 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002052 * enabled
2053 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002054 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2055 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002056 bp->fp->cl_id +
2057 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002058
2059 /*
2060 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2061 * per-port, so if explicit configuration is needed , do it only
2062 * for a PMF.
2063 *
2064 * For 57712 and newer on the other hand it's a per-function
2065 * configuration.
2066 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002067 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002068}
2069
Ariel Elior60cad4e2013-09-04 14:09:22 +03002070int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2071 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002072{
Yuval Mintz3b603062012-03-18 10:33:39 +00002073 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002074
2075 /* Although RSS is meaningless when there is a single HW queue we
2076 * still need it enabled in order to have HW Rx hash generated.
2077 *
2078 * if (!is_eth_multi(bp))
2079 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2080 */
2081
Dmitry Kravkov96305232012-04-03 18:41:30 +00002082 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002083
2084 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2085
Ariel Elior60cad4e2013-09-04 14:09:22 +03002086 if (enable) {
2087 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002088
Ariel Elior60cad4e2013-09-04 14:09:22 +03002089 /* RSS configuration */
2090 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2091 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2092 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2093 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2094 if (rss_obj->udp_rss_v4)
2095 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2096 if (rss_obj->udp_rss_v6)
2097 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002098
Yuval Mintz28311f82015-07-22 09:16:22 +03002099 if (!CHIP_IS_E1x(bp)) {
2100 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2101 __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2103
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002104 /* valid only for TUNN_MODE_GRE tunnel mode */
Yuval Mintz28311f82015-07-22 09:16:22 +03002105 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2106 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002107 } else {
2108 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2109 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002110
Dmitry Kravkov96305232012-04-03 18:41:30 +00002111 /* Hash bits */
2112 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002113
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002114 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115
Dmitry Kravkov96305232012-04-03 18:41:30 +00002116 if (config_hash) {
2117 /* RSS keys */
Eric Dumazete3ec69c2014-11-16 06:23:07 -08002118 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002119 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002120 }
2121
Ariel Elior60cad4e2013-09-04 14:09:22 +03002122 if (IS_PF(bp))
2123 return bnx2x_config_rss(bp, &params);
2124 else
2125 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002126}
2127
Eric Dumazet1191cb82012-04-27 21:39:21 +00002128static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002129{
Yuval Mintz3b603062012-03-18 10:33:39 +00002130 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002131
2132 /* Prepare parameters for function state transitions */
2133 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2134
2135 func_params.f_obj = &bp->func_obj;
2136 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2137
2138 func_params.params.hw_init.load_phase = load_code;
2139
2140 return bnx2x_func_state_change(bp, &func_params);
2141}
2142
2143/*
2144 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002145 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002146 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002147void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002148{
2149 int rc;
2150 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002151 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002152 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002153
2154 /***************** Cleanup MACs' object first *************************/
2155
2156 /* Wait for completion of requested */
2157 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2158 /* Perform a dry cleanup */
2159 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2160
2161 /* Clean ETH primary MAC */
2162 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002163 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002164 &ramrod_flags);
2165 if (rc != 0)
2166 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2167
2168 /* Cleanup UC list */
2169 vlan_mac_flags = 0;
2170 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2171 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2172 &ramrod_flags);
2173 if (rc != 0)
2174 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2175
2176 /***************** Now clean mcast object *****************************/
2177 rparam.mcast_obj = &bp->mcast_obj;
2178 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2179
Yuval Mintz8b09be52013-08-01 17:30:59 +03002180 /* Add a DEL command... - Since we're doing a driver cleanup only,
2181 * we take a lock surrounding both the initial send and the CONTs,
2182 * as we don't want a true completion to disrupt us in the middle.
2183 */
2184 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002185 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2186 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002187 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2188 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002189
2190 /* ...and wait until all pending commands are cleared */
2191 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2192 while (rc != 0) {
2193 if (rc < 0) {
2194 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2195 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002196 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002197 return;
2198 }
2199
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2201 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002202 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002203}
2204
2205#ifndef BNX2X_STOP_ON_ERROR
2206#define LOAD_ERROR_EXIT(bp, label) \
2207 do { \
2208 (bp)->state = BNX2X_STATE_ERROR; \
2209 goto label; \
2210 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002211
2212#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2213 do { \
2214 bp->cnic_loaded = false; \
2215 goto label; \
2216 } while (0)
2217#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002218#define LOAD_ERROR_EXIT(bp, label) \
2219 do { \
2220 (bp)->state = BNX2X_STATE_ERROR; \
2221 (bp)->panic = 1; \
2222 return -EBUSY; \
2223 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002224#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2225 do { \
2226 bp->cnic_loaded = false; \
2227 (bp)->panic = 1; \
2228 return -EBUSY; \
2229 } while (0)
2230#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002231
Ariel Eliorad5afc82013-01-01 05:22:26 +00002232static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002233{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002234 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2235 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2236 return;
2237}
Yuval Mintz452427b2012-03-26 20:47:07 +00002238
Ariel Eliorad5afc82013-01-01 05:22:26 +00002239static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2240{
Ariel Elior8db573b2013-01-01 05:22:37 +00002241 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002242 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002243
Ariel Eliorad5afc82013-01-01 05:22:26 +00002244 /* number of queues for statistics is number of eth queues + FCoE */
2245 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002246
Ariel Eliorad5afc82013-01-01 05:22:26 +00002247 /* Total number of FW statistics requests =
2248 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2249 * and fcoe l2 queue) stats + num of queues (which includes another 1
2250 * for fcoe l2 queue if applicable)
2251 */
2252 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2253
Ariel Elior8db573b2013-01-01 05:22:37 +00002254 /* vf stats appear in the request list, but their data is allocated by
2255 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2256 * it is used to determine where to place the vf stats queries in the
2257 * request struct
2258 */
2259 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002260 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002261
Ariel Eliorad5afc82013-01-01 05:22:26 +00002262 /* Request is built from stats_query_header and an array of
2263 * stats_query_cmd_group each of which contains
2264 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2265 * configured in the stats_query_header.
2266 */
2267 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002268 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2269 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002270 1 : 0));
2271
Ariel Elior8db573b2013-01-01 05:22:37 +00002272 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2273 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002274 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2275 num_groups * sizeof(struct stats_query_cmd_group);
2276
2277 /* Data for statistics requests + stats_counter
2278 * stats_counter holds per-STORM counters that are incremented
2279 * when STORM has finished with the current request.
2280 * memory for FCoE offloaded statistics are counted anyway,
2281 * even if they will not be sent.
2282 * VF stats are not accounted for here as the data of VF stats is stored
2283 * in memory allocated by the VF, not here.
2284 */
2285 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2286 sizeof(struct per_pf_stats) +
2287 sizeof(struct fcoe_statistics_params) +
2288 sizeof(struct per_queue_stats) * num_queue_stats +
2289 sizeof(struct stats_counter);
2290
Joe Perchescd2b0382014-02-20 13:25:51 -08002291 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2292 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2293 if (!bp->fw_stats)
2294 goto alloc_mem_err;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002295
2296 /* Set shortcuts */
2297 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2298 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2299 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2300 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2301 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2302 bp->fw_stats_req_sz;
2303
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002304 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002305 U64_HI(bp->fw_stats_req_mapping),
2306 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002307 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002308 U64_HI(bp->fw_stats_data_mapping),
2309 U64_LO(bp->fw_stats_data_mapping));
2310 return 0;
2311
2312alloc_mem_err:
2313 bnx2x_free_fw_stats_mem(bp);
2314 BNX2X_ERR("Can't allocate FW stats memory\n");
2315 return -ENOMEM;
2316}
2317
2318/* send load request to mcp and analyze response */
2319static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2320{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002321 u32 param;
2322
Ariel Eliorad5afc82013-01-01 05:22:26 +00002323 /* init fw_seq */
2324 bp->fw_seq =
2325 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2326 DRV_MSG_SEQ_NUMBER_MASK);
2327 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2328
2329 /* Get current FW pulse sequence */
2330 bp->fw_drv_pulse_wr_seq =
2331 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2332 DRV_PULSE_SEQ_MASK);
2333 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2334
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002335 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2336
2337 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2338 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2339
Ariel Eliorad5afc82013-01-01 05:22:26 +00002340 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002341 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002342
2343 /* if mcp fails to respond we must abort */
2344 if (!(*load_code)) {
2345 BNX2X_ERR("MCP response failure, aborting\n");
2346 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002347 }
2348
Ariel Eliorad5afc82013-01-01 05:22:26 +00002349 /* If mcp refused (e.g. other port is in diagnostic mode) we
2350 * must abort
2351 */
2352 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2353 BNX2X_ERR("MCP refused load request, aborting\n");
2354 return -EBUSY;
2355 }
2356 return 0;
2357}
2358
2359/* check whether another PF has already loaded FW to chip. In
2360 * virtualized environments a pf from another VM may have already
2361 * initialized the device including loading FW
2362 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002363int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002364{
2365 /* is another pf loaded on this engine? */
2366 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2367 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2368 /* build my FW version dword */
2369 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2370 (BCM_5710_FW_MINOR_VERSION << 8) +
2371 (BCM_5710_FW_REVISION_VERSION << 16) +
2372 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2373
2374 /* read loaded FW from chip */
2375 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2376
2377 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2378 loaded_fw, my_fw);
2379
2380 /* abort nic load if version mismatch */
2381 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002382 if (print_err)
2383 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2384 loaded_fw, my_fw);
2385 else
2386 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2387 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002388 return -EBUSY;
2389 }
2390 }
2391 return 0;
2392}
2393
2394/* returns the "mcp load_code" according to global load_count array */
2395static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2396{
2397 int path = BP_PATH(bp);
2398
2399 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002400 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2401 bnx2x_load_count[path][2]);
2402 bnx2x_load_count[path][0]++;
2403 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002404 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002405 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2406 bnx2x_load_count[path][2]);
2407 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002408 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002409 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002410 return FW_MSG_CODE_DRV_LOAD_PORT;
2411 else
2412 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2413}
2414
2415/* mark PMF if applicable */
2416static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2417{
2418 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2419 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2420 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2421 bp->port.pmf = 1;
2422 /* We need the barrier to ensure the ordering between the
2423 * writing to bp->port.pmf here and reading it from the
2424 * bnx2x_periodic_task().
2425 */
2426 smp_mb();
2427 } else {
2428 bp->port.pmf = 0;
2429 }
2430
2431 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2432}
2433
2434static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2435{
2436 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2437 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2438 (bp->common.shmem2_base)) {
2439 if (SHMEM2_HAS(bp, dcc_support))
2440 SHMEM2_WR(bp, dcc_support,
2441 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2442 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2443 if (SHMEM2_HAS(bp, afex_driver_support))
2444 SHMEM2_WR(bp, afex_driver_support,
2445 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2446 }
2447
2448 /* Set AFEX default VLAN tag to an invalid value */
2449 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002450}
2451
Eric Dumazet1191cb82012-04-27 21:39:21 +00002452/**
2453 * bnx2x_bz_fp - zero content of the fastpath structure.
2454 *
2455 * @bp: driver handle
2456 * @index: fastpath index to be zeroed
2457 *
2458 * Makes sure the contents of the bp->fp[index].napi is kept
2459 * intact.
2460 */
2461static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2462{
2463 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002464 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002465 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002466 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002467
Eric Dumazet1191cb82012-04-27 21:39:21 +00002468 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002469 if (fp->tpa_info)
2470 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2471 sizeof(struct bnx2x_agg_info));
2472 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002473
2474 /* Restore the NAPI object as it has been already initialized */
2475 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002476 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002477 fp->bp = bp;
2478 fp->index = index;
2479 if (IS_ETH_FP(fp))
2480 fp->max_cos = bp->max_cos;
2481 else
2482 /* Special queues support only one CoS */
2483 fp->max_cos = 1;
2484
Merav Sicron65565882012-06-19 07:48:26 +00002485 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002486 if (IS_FCOE_FP(fp))
2487 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002488 if (IS_ETH_FP(fp))
2489 for_each_cos_in_tx_queue(fp, cos)
2490 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2491 BNX2X_NUM_ETH_QUEUES(bp) + index];
2492
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002493 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002494 * minimal size so it must be set prior to queue memory allocation
2495 */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002496 if (bp->dev->features & NETIF_F_LRO)
Eric Dumazet1191cb82012-04-27 21:39:21 +00002497 fp->mode = TPA_MODE_LRO;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002498 else if (bp->dev->features & NETIF_F_GRO &&
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002499 bnx2x_mtu_allows_gro(bp->dev->mtu))
Eric Dumazet1191cb82012-04-27 21:39:21 +00002500 fp->mode = TPA_MODE_GRO;
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002501 else
2502 fp->mode = TPA_MODE_DISABLED;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002503
Michal Schmidt22a8f232015-04-27 17:20:38 +02002504 /* We don't want TPA if it's disabled in bp
2505 * or if this is an FCoE L2 ring.
2506 */
2507 if (bp->disable_tpa || IS_FCOE_FP(fp))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002508 fp->mode = TPA_MODE_DISABLED;
Merav Sicron55c11942012-11-07 00:45:48 +00002509}
2510
Yuval Mintz230d00e2015-07-22 09:16:25 +03002511void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2512{
2513 u32 cur;
2514
2515 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2516 return;
2517
2518 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2519 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2520 cur, state);
2521
2522 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2523}
2524
Merav Sicron55c11942012-11-07 00:45:48 +00002525int bnx2x_load_cnic(struct bnx2x *bp)
2526{
2527 int i, rc, port = BP_PORT(bp);
2528
2529 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2530
2531 mutex_init(&bp->cnic_mutex);
2532
Ariel Eliorad5afc82013-01-01 05:22:26 +00002533 if (IS_PF(bp)) {
2534 rc = bnx2x_alloc_mem_cnic(bp);
2535 if (rc) {
2536 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2537 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2538 }
Merav Sicron55c11942012-11-07 00:45:48 +00002539 }
2540
2541 rc = bnx2x_alloc_fp_mem_cnic(bp);
2542 if (rc) {
2543 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2544 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2545 }
2546
2547 /* Update the number of queues with the cnic queues */
2548 rc = bnx2x_set_real_num_queues(bp, 1);
2549 if (rc) {
2550 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2551 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2552 }
2553
2554 /* Add all CNIC NAPI objects */
2555 bnx2x_add_all_napi_cnic(bp);
2556 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2557 bnx2x_napi_enable_cnic(bp);
2558
2559 rc = bnx2x_init_hw_func_cnic(bp);
2560 if (rc)
2561 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2562
2563 bnx2x_nic_init_cnic(bp);
2564
Ariel Eliorad5afc82013-01-01 05:22:26 +00002565 if (IS_PF(bp)) {
2566 /* Enable Timer scan */
2567 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002568
Ariel Eliorad5afc82013-01-01 05:22:26 +00002569 /* setup cnic queues */
2570 for_each_cnic_queue(bp, i) {
2571 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2572 if (rc) {
2573 BNX2X_ERR("Queue setup failed\n");
2574 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2575 }
Merav Sicron55c11942012-11-07 00:45:48 +00002576 }
2577 }
2578
2579 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002580 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002581
2582 /* re-read iscsi info */
2583 bnx2x_get_iscsi_info(bp);
2584 bnx2x_setup_cnic_irq_info(bp);
2585 bnx2x_setup_cnic_info(bp);
2586 bp->cnic_loaded = true;
2587 if (bp->state == BNX2X_STATE_OPEN)
2588 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2589
Merav Sicron55c11942012-11-07 00:45:48 +00002590 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2591
2592 return 0;
2593
2594#ifndef BNX2X_STOP_ON_ERROR
2595load_error_cnic2:
2596 /* Disable Timer scan */
2597 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2598
2599load_error_cnic1:
2600 bnx2x_napi_disable_cnic(bp);
2601 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002602 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002603 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2604load_error_cnic0:
2605 BNX2X_ERR("CNIC-related load failed\n");
2606 bnx2x_free_fp_mem_cnic(bp);
2607 bnx2x_free_mem_cnic(bp);
2608 return rc;
2609#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002610}
2611
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002612/* must be called with rtnl_lock */
2613int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2614{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002615 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002616 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002617
Merav Sicron55c11942012-11-07 00:45:48 +00002618 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2619 DP(NETIF_MSG_IFUP,
2620 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2621
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002622#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002623 if (unlikely(bp->panic)) {
2624 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002625 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002626 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002627#endif
2628
2629 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2630
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002631 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002632 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2633 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2634 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002635
Ariel Eliorad5afc82013-01-01 05:22:26 +00002636 if (IS_PF(bp))
2637 /* must be called before memory allocation and HW init */
2638 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002639
Ariel Elior6383c0b2011-07-14 08:31:57 +00002640 /*
2641 * Zero fastpath structures preserving invariants like napi, which are
2642 * allocated only once, fp index, max_cos, bp pointer.
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002643 * Also set fp->mode and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002644 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002645 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002646 for_each_queue(bp, i)
2647 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002648 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2649 bp->num_cnic_queues) *
2650 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002651
Merav Sicron55c11942012-11-07 00:45:48 +00002652 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002653
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002654 /* Set the receive queues buffer size */
2655 bnx2x_set_rx_buf_size(bp);
2656
Ariel Eliorad5afc82013-01-01 05:22:26 +00002657 if (IS_PF(bp)) {
2658 rc = bnx2x_alloc_mem(bp);
2659 if (rc) {
2660 BNX2X_ERR("Unable to allocate bp memory\n");
2661 return rc;
2662 }
2663 }
2664
Ariel Eliorad5afc82013-01-01 05:22:26 +00002665 /* need to be done after alloc mem, since it's self adjusting to amount
2666 * of memory available for RSS queues
2667 */
2668 rc = bnx2x_alloc_fp_mem(bp);
2669 if (rc) {
2670 BNX2X_ERR("Unable to allocate memory for fps\n");
2671 LOAD_ERROR_EXIT(bp, load_error0);
2672 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002673
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002674 /* Allocated memory for FW statistics */
2675 if (bnx2x_alloc_fw_stats_mem(bp))
2676 LOAD_ERROR_EXIT(bp, load_error0);
2677
Ariel Elior8d9ac292013-01-01 05:22:27 +00002678 /* request pf to initialize status blocks */
2679 if (IS_VF(bp)) {
2680 rc = bnx2x_vfpf_init(bp);
2681 if (rc)
2682 LOAD_ERROR_EXIT(bp, load_error0);
2683 }
2684
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002685 /* As long as bnx2x_alloc_mem() may possibly update
2686 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002687 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002688 */
Merav Sicron55c11942012-11-07 00:45:48 +00002689 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002690 if (rc) {
2691 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002692 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002693 }
2694
Ariel Elior6383c0b2011-07-14 08:31:57 +00002695 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002696 * this configuration may be overridden by a multi class queue
2697 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002698 */
2699 bnx2x_setup_tc(bp->dev, bp->max_cos);
2700
Merav Sicron26614ba2012-08-27 03:26:19 +00002701 /* Add all NAPI objects */
2702 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002703 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002704 bnx2x_napi_enable(bp);
2705
Ariel Eliorad5afc82013-01-01 05:22:26 +00002706 if (IS_PF(bp)) {
2707 /* set pf load just before approaching the MCP */
2708 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002709
Ariel Eliorad5afc82013-01-01 05:22:26 +00002710 /* if mcp exists send load request and analyze response */
2711 if (!BP_NOMCP(bp)) {
2712 /* attempt to load pf */
2713 rc = bnx2x_nic_load_request(bp, &load_code);
2714 if (rc)
2715 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002716
Ariel Eliorad5afc82013-01-01 05:22:26 +00002717 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002718 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002719 if (rc) {
2720 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002721 LOAD_ERROR_EXIT(bp, load_error2);
2722 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002723 } else {
2724 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002725 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002726
Ariel Eliorad5afc82013-01-01 05:22:26 +00002727 /* mark pmf if applicable */
2728 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002729
Ariel Eliorad5afc82013-01-01 05:22:26 +00002730 /* Init Function state controlling object */
2731 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002732
Ariel Eliorad5afc82013-01-01 05:22:26 +00002733 /* Initialize HW */
2734 rc = bnx2x_init_hw(bp, load_code);
2735 if (rc) {
2736 BNX2X_ERR("HW init failed, aborting\n");
2737 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2738 LOAD_ERROR_EXIT(bp, load_error2);
2739 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002740 }
2741
Yuval Mintzecf01c22013-04-22 02:53:03 +00002742 bnx2x_pre_irq_nic_init(bp);
2743
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002744 /* Connect to IRQs */
2745 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002746 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002747 BNX2X_ERR("setup irqs failed\n");
2748 if (IS_PF(bp))
2749 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002750 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002751 }
2752
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002753 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002754 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002755 /* Setup NIC internals and enable interrupts */
2756 bnx2x_post_irq_nic_init(bp, load_code);
2757
Ariel Eliorad5afc82013-01-01 05:22:26 +00002758 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002759 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002760
Ariel Eliorad5afc82013-01-01 05:22:26 +00002761 /* Set AFEX default VLAN tag to an invalid value */
2762 bp->afex_def_vlan_tag = -1;
2763 bnx2x_nic_load_afex_dcc(bp, load_code);
2764 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2765 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002766 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002767 BNX2X_ERR("Function start failed!\n");
2768 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2769
Merav Sicron55c11942012-11-07 00:45:48 +00002770 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002771 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002772
Ariel Eliorad5afc82013-01-01 05:22:26 +00002773 /* Send LOAD_DONE command to MCP */
2774 if (!BP_NOMCP(bp)) {
2775 load_code = bnx2x_fw_command(bp,
2776 DRV_MSG_CODE_LOAD_DONE, 0);
2777 if (!load_code) {
2778 BNX2X_ERR("MCP response failure, aborting\n");
2779 rc = -EBUSY;
2780 LOAD_ERROR_EXIT(bp, load_error3);
2781 }
2782 }
2783
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002784 /* initialize FW coalescing state machines in RAM */
2785 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002786 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002787
Ariel Elior60cad4e2013-09-04 14:09:22 +03002788 /* setup the leading queue */
2789 rc = bnx2x_setup_leading(bp);
2790 if (rc) {
2791 BNX2X_ERR("Setup leading failed!\n");
2792 LOAD_ERROR_EXIT(bp, load_error3);
2793 }
2794
2795 /* set up the rest of the queues */
2796 for_each_nondefault_eth_queue(bp, i) {
2797 if (IS_PF(bp))
2798 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2799 else /* VF */
2800 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002801 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002802 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002803 LOAD_ERROR_EXIT(bp, load_error3);
2804 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002805 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002806
Ariel Elior60cad4e2013-09-04 14:09:22 +03002807 /* setup rss */
2808 rc = bnx2x_init_rss(bp);
2809 if (rc) {
2810 BNX2X_ERR("PF RSS init failed\n");
2811 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002812 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002813
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002814 /* Now when Clients are configured we are ready to work */
2815 bp->state = BNX2X_STATE_OPEN;
2816
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002817 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002818 if (IS_PF(bp))
2819 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002820 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002821 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2822 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002823 if (rc) {
2824 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002825 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002826 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002827
Ariel Eliorad5afc82013-01-01 05:22:26 +00002828 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002829 bnx2x_update_max_mf_config(bp, bp->pending_max);
2830 bp->pending_max = 0;
2831 }
2832
Sudarsana Reddy Kalluru21fe14f2018-06-28 04:52:15 -07002833 bp->force_link_down = false;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002834 if (bp->port.pmf) {
2835 rc = bnx2x_initial_phy_init(bp, load_mode);
2836 if (rc)
2837 LOAD_ERROR_EXIT(bp, load_error3);
2838 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002839 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002840
2841 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002842
Yuval Mintz05cc5a32015-07-29 15:52:46 +03002843 /* Re-configure vlan filters */
2844 rc = bnx2x_vlan_reconfigure_vid(bp);
2845 if (rc)
2846 LOAD_ERROR_EXIT(bp, load_error3);
2847
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002848 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002849 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002850
Michal Kalderoneeed0182014-08-17 16:47:44 +03002851 if (bp->flags & PTP_SUPPORTED) {
2852 bnx2x_init_ptp(bp);
2853 bnx2x_configure_ptp_filters(bp);
2854 }
2855 /* Start Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002856 switch (load_mode) {
2857 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002858 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002859 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002860 break;
2861
2862 case LOAD_OPEN:
2863 netif_tx_start_all_queues(bp->dev);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002864 smp_mb__after_atomic();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002865 break;
2866
2867 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002868 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002869 bp->state = BNX2X_STATE_DIAG;
2870 break;
2871
2872 default:
2873 break;
2874 }
2875
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002876 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002877 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002878 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002879 bnx2x__link_status_update(bp);
2880
2881 /* start the timer */
2882 mod_timer(&bp->timer, jiffies + bp->current_interval);
2883
Merav Sicron55c11942012-11-07 00:45:48 +00002884 if (CNIC_ENABLED(bp))
2885 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002886
Yuval Mintz42f82772014-03-23 18:12:23 +02002887 if (IS_PF(bp))
2888 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2889
Ariel Eliorad5afc82013-01-01 05:22:26 +00002890 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2891 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002892 u32 val;
2893 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
Yuval Mintz230d00e2015-07-22 09:16:25 +03002894 val &= ~DRV_FLAGS_MTU_MASK;
2895 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002896 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2897 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2898 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2899 }
2900
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002901 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002902 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002903 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002904 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002905 return -EBUSY;
2906 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002907
Yuval Mintzc48f3502015-07-22 09:16:26 +03002908 /* Update driver data for On-Chip MFW dump. */
2909 if (IS_PF(bp))
2910 bnx2x_update_mfw_dump(bp);
2911
Barak Witkowski98768792012-06-19 07:48:31 +00002912 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2913 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2914 bnx2x_dcbx_init(bp, false);
2915
Yuval Mintz230d00e2015-07-22 09:16:25 +03002916 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2917 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2918
Merav Sicron55c11942012-11-07 00:45:48 +00002919 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2920
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002921 return 0;
2922
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002923#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002924load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002925 if (IS_PF(bp)) {
2926 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002927
Ariel Eliorad5afc82013-01-01 05:22:26 +00002928 /* Clean queueable objects */
2929 bnx2x_squeeze_objects(bp);
2930 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002931
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002932 /* Free SKBs, SGEs, TPA pool and driver internals */
2933 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002934 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002935 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002936
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002937 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002938 bnx2x_free_irq(bp);
2939load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002940 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002941 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2942 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2943 }
2944
2945 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002946load_error1:
2947 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002948 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002949
Ariel Elior889b9af2012-01-26 06:01:51 +00002950 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002951 if (IS_PF(bp))
2952 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002953load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002954 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002955 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002956 bnx2x_free_mem(bp);
2957
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002958 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002959#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002960}
2961
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002962int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002963{
2964 u8 rc = 0, cos, i;
2965
2966 /* Wait until tx fastpath tasks complete */
2967 for_each_tx_queue(bp, i) {
2968 struct bnx2x_fastpath *fp = &bp->fp[i];
2969
2970 for_each_cos_in_tx_queue(fp, cos)
2971 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2972 if (rc)
2973 return rc;
2974 }
2975 return 0;
2976}
2977
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002978/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002979int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002980{
2981 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002982 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002983
Merav Sicron55c11942012-11-07 00:45:48 +00002984 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2985
Yuval Mintz230d00e2015-07-22 09:16:25 +03002986 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2987 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2988
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002989 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002990 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002991 u32 val;
2992 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2993 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2994 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2995 }
2996
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002997 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002998 (bp->state == BNX2X_STATE_CLOSED ||
2999 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003000 /* We can get here if the driver has been unloaded
3001 * during parity error recovery and is either waiting for a
3002 * leader to complete or for other functions to unload and
3003 * then ifdown has been issued. In this case we want to
3004 * unload and let other functions to complete a recovery
3005 * process.
3006 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003007 bp->recovery_state = BNX2X_RECOVERY_DONE;
3008 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003009 bnx2x_release_leader_lock(bp);
3010 smp_mb();
3011
Merav Sicron51c1a582012-03-18 10:33:38 +00003012 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3013 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003014 return -EINVAL;
3015 }
3016
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003017 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003018 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003019 *
3020 * we can get here only after unsuccessful ndo_* callback, during which
3021 * dev->IFF_UP flag is still on.
3022 */
3023 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3024 return 0;
3025
3026 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07003027 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3028 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3029 */
3030 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3031 smp_mb();
3032
Ariel Elior78c3bcc2013-06-20 17:39:08 +03003033 /* indicate to VFs that the PF is going down */
3034 bnx2x_iov_channel_down(bp);
3035
Merav Sicron55c11942012-11-07 00:45:48 +00003036 if (CNIC_LOADED(bp))
3037 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3038
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003039 /* Stop Tx */
3040 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003041 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003042
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003043 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003044
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003045 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003046
Guilherme G. Piccoliaf60c382017-12-22 13:01:39 -02003047 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003048 /* Set ALWAYS_ALIVE bit in shmem */
3049 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3050 bnx2x_drv_pulse(bp);
3051 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3052 bnx2x_save_statistics(bp);
3053 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003054
Yuval Mintzd78a1f02016-03-13 21:21:48 +02003055 /* wait till consumers catch up with producers in all queues.
3056 * If we're recovering, FW can't write to host so no reason
3057 * to wait for the queues to complete all Tx.
3058 */
3059 if (unload_mode != UNLOAD_RECOVERY)
3060 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003061
Ariel Elior9b176b62013-01-01 05:22:28 +00003062 /* if VF indicate to PF this function is going down (PF will delete sp
3063 * elements and clear initializations
3064 */
3065 if (IS_VF(bp))
3066 bnx2x_vfpf_close_vf(bp);
3067 else if (unload_mode != UNLOAD_RECOVERY)
3068 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00003069 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003070 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003071 /* Send the UNLOAD_REQUEST to the MCP */
3072 bnx2x_send_unload_req(bp, unload_mode);
3073
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003074 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003075 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003076 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003077 * (the engine which leader will perform the recovery
3078 * last).
3079 */
3080 if (!CHIP_IS_E1x(bp))
3081 bnx2x_pf_disable(bp);
3082
3083 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003084 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00003085 /* Delete all NAPI objects */
3086 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003087 if (CNIC_LOADED(bp))
3088 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003089 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003090 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003091
3092 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00003093 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003094 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003095
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003096 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003097 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003098 * the queueable objects here in case they failed to get cleaned so far.
3099 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003100 if (IS_PF(bp))
3101 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003102
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003103 /* There should be no more pending SP commands at this stage */
3104 bp->sp_state = 0;
3105
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003106 bp->port.pmf = 0;
3107
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003108 /* clear pending work in rtnl task */
3109 bp->sp_rtnl_state = 0;
3110 smp_mb();
3111
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003112 /* Free SKBs, SGEs, TPA pool and driver internals */
3113 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003114 if (CNIC_LOADED(bp))
3115 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003116 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003117 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003118
Ariel Eliorad5afc82013-01-01 05:22:26 +00003119 bnx2x_free_fp_mem(bp);
3120 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003121 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003122
Ariel Eliorad5afc82013-01-01 05:22:26 +00003123 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003124 if (CNIC_LOADED(bp))
3125 bnx2x_free_mem_cnic(bp);
3126 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003127 bnx2x_free_mem(bp);
3128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003129 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003130 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003131
Yuval Mintz42f82772014-03-23 18:12:23 +02003132 /* Clear driver version indication in shmem */
Guilherme G. Piccoliaf60c382017-12-22 13:01:39 -02003133 if (IS_PF(bp) && !BP_NOMCP(bp))
Yuval Mintz42f82772014-03-23 18:12:23 +02003134 bnx2x_update_mng_version(bp);
3135
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003136 /* Check if there are pending parity attentions. If there are - set
3137 * RECOVERY_IN_PROGRESS.
3138 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003139 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003140 bnx2x_set_reset_in_progress(bp);
3141
3142 /* Set RESET_IS_GLOBAL if needed */
3143 if (global)
3144 bnx2x_set_reset_global(bp);
3145 }
3146
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003147 /* The last driver must disable a "close the gate" if there is no
3148 * parity attention or "process kill" pending.
3149 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003150 if (IS_PF(bp) &&
3151 !bnx2x_clear_pf_load(bp) &&
3152 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003153 bnx2x_disable_close_the_gate(bp);
3154
Merav Sicron55c11942012-11-07 00:45:48 +00003155 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3156
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003157 return 0;
3158}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003159
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003160int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3161{
3162 u16 pmcsr;
3163
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003164 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003165 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003166 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003167 return 0;
3168 }
3169
Jon Mason29ed74c2013-09-11 11:22:39 -07003170 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003171
3172 switch (state) {
3173 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003174 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003175 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3176 PCI_PM_CTRL_PME_STATUS));
3177
3178 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3179 /* delay required during transition out of D3hot */
3180 msleep(20);
3181 break;
3182
3183 case PCI_D3hot:
3184 /* If there are other clients above don't
3185 shut down the power */
3186 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3187 return 0;
3188 /* Don't shut down the power for emulation and FPGA */
3189 if (CHIP_REV_IS_SLOW(bp))
3190 return 0;
3191
3192 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3193 pmcsr |= 3;
3194
3195 if (bp->wol)
3196 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3197
Jon Mason29ed74c2013-09-11 11:22:39 -07003198 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003199 pmcsr);
3200
3201 /* No more memory access after this point until
3202 * device is brought back to D0.
3203 */
3204 break;
3205
3206 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003207 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003208 return -EINVAL;
3209 }
3210 return 0;
3211}
3212
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003213/*
3214 * net_device service functions
3215 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003216static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003217{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003218 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3219 napi);
3220 struct bnx2x *bp = fp->bp;
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003221 int rx_work_done;
3222 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003223
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003224#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003225 if (unlikely(bp->panic)) {
3226 napi_complete(napi);
3227 return 0;
3228 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003229#endif
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003230 for_each_cos_in_tx_queue(fp, cos)
3231 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3232 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003233
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003234 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003235
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003236 if (rx_work_done < budget) {
3237 /* No need to update SB for FCoE L2 ring as long as
3238 * it's connected to the default SB and the SB
3239 * has been updated when NAPI was scheduled.
3240 */
3241 if (IS_FCOE_FP(fp)) {
3242 napi_complete(napi);
3243 } else {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003244 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003245 /* bnx2x_has_rx_work() reads the status block,
3246 * thus we need to ensure that status block indices
3247 * have been actually read (bnx2x_update_fpsb_idx)
3248 * prior to this check (bnx2x_has_rx_work) so that
3249 * we won't write the "newer" value of the status block
3250 * to IGU (if there was a DMA right after
3251 * bnx2x_has_rx_work and if there is no rmb, the memory
3252 * reading (bnx2x_update_fpsb_idx) may be postponed
3253 * to right before bnx2x_ack_sb). In this case there
3254 * will never be another interrupt until there is
3255 * another update of the status block, while there
3256 * is still unhandled work.
3257 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003258 rmb();
3259
3260 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3261 napi_complete(napi);
3262 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003263 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003264 "Update index to %d\n", fp->fp_hc_idx);
3265 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3266 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003267 IGU_INT_ENABLE, 1);
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003268 } else {
3269 rx_work_done = budget;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003270 }
3271 }
3272 }
3273
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003274 return rx_work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003275}
3276
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003277/* we split the first BD into headers and data BDs
3278 * to ease the pain of our fellow microcode engineers
3279 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003280 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003281static u16 bnx2x_tx_split(struct bnx2x *bp,
3282 struct bnx2x_fp_txdata *txdata,
3283 struct sw_tx_bd *tx_buf,
3284 struct eth_tx_start_bd **tx_bd, u16 hlen,
3285 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003286{
3287 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3288 struct eth_tx_bd *d_tx_bd;
3289 dma_addr_t mapping;
3290 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3291
3292 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003293 h_tx_bd->nbytes = cpu_to_le16(hlen);
3294
Dmitry Kravkov91226792013-03-11 05:17:52 +00003295 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3296 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003297
3298 /* now get a new data BD
3299 * (after the pbd) and fill it */
3300 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003301 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003302
3303 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3304 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3305
3306 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3307 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3308 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3309
3310 /* this marks the BD as one that has no individual mapping */
3311 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3312
3313 DP(NETIF_MSG_TX_QUEUED,
3314 "TSO split data size is %d (%x:%x)\n",
3315 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3316
3317 /* update tx_bd */
3318 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3319
3320 return bd_prod;
3321}
3322
Yuval Mintz86564c32013-01-23 03:21:50 +00003323#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3324#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003325static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003326{
Yuval Mintz86564c32013-01-23 03:21:50 +00003327 __sum16 tsum = (__force __sum16) csum;
3328
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003329 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003330 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3331 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003332
3333 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003334 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3335 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003336
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003337 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003338}
3339
Dmitry Kravkov91226792013-03-11 05:17:52 +00003340static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003341{
3342 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003343 __u8 prot = 0;
3344 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003345
3346 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003347 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003348
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003349 protocol = vlan_get_protocol(skb);
3350 if (protocol == htons(ETH_P_IPV6)) {
3351 rc = XMIT_CSUM_V6;
3352 prot = ipv6_hdr(skb)->nexthdr;
3353 } else {
3354 rc = XMIT_CSUM_V4;
3355 prot = ip_hdr(skb)->protocol;
3356 }
3357
3358 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3359 if (inner_ip_hdr(skb)->version == 6) {
3360 rc |= XMIT_CSUM_ENC_V6;
3361 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003362 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003363 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003364 rc |= XMIT_CSUM_ENC_V4;
3365 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003366 rc |= XMIT_CSUM_TCP;
3367 }
3368 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003369 if (prot == IPPROTO_TCP)
3370 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003371
Eric Dumazet36a8f392013-09-29 01:21:32 -07003372 if (skb_is_gso(skb)) {
3373 if (skb_is_gso_v6(skb)) {
3374 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3375 if (rc & XMIT_CSUM_ENC)
3376 rc |= XMIT_GSO_ENC_V6;
3377 } else {
3378 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3379 if (rc & XMIT_CSUM_ENC)
3380 rc |= XMIT_GSO_ENC_V4;
3381 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003382 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003383
3384 return rc;
3385}
3386
Yuval Mintzea2465a2015-12-18 10:42:12 +02003387/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3388#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3389
3390/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3391#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3392
3393#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003394/* check if packet requires linearization (packet is too fragmented)
3395 no need to check fragmentation if page size > 8K (there will be no
3396 violation to FW restrictions) */
3397static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3398 u32 xmit_type)
3399{
Yuval Mintzea2465a2015-12-18 10:42:12 +02003400 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3401 int to_copy = 0, hlen = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003402
Yuval Mintzea2465a2015-12-18 10:42:12 +02003403 if (xmit_type & XMIT_GSO_ENC)
3404 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003405
Yuval Mintzea2465a2015-12-18 10:42:12 +02003406 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003407 if (xmit_type & XMIT_GSO) {
3408 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
Yuval Mintzea2465a2015-12-18 10:42:12 +02003409 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003410 /* Number of windows to check */
3411 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3412 int wnd_idx = 0;
3413 int frag_idx = 0;
3414 u32 wnd_sum = 0;
3415
3416 /* Headers length */
Yuval Mintz592b9b82015-06-25 15:19:29 +03003417 if (xmit_type & XMIT_GSO_ENC)
3418 hlen = (int)(skb_inner_transport_header(skb) -
3419 skb->data) +
3420 inner_tcp_hdrlen(skb);
3421 else
3422 hlen = (int)(skb_transport_header(skb) -
3423 skb->data) + tcp_hdrlen(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003424
3425 /* Amount of data (w/o headers) on linear part of SKB*/
3426 first_bd_sz = skb_headlen(skb) - hlen;
3427
3428 wnd_sum = first_bd_sz;
3429
3430 /* Calculate the first sum - it's special */
3431 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3432 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003433 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003434
3435 /* If there was data on linear skb data - check it */
3436 if (first_bd_sz > 0) {
3437 if (unlikely(wnd_sum < lso_mss)) {
3438 to_copy = 1;
3439 goto exit_lbl;
3440 }
3441
3442 wnd_sum -= first_bd_sz;
3443 }
3444
3445 /* Others are easier: run through the frag list and
3446 check all windows */
3447 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3448 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003449 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003450
3451 if (unlikely(wnd_sum < lso_mss)) {
3452 to_copy = 1;
3453 break;
3454 }
3455 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003456 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003457 }
3458 } else {
3459 /* in non-LSO too fragmented packet should always
3460 be linearized */
3461 to_copy = 1;
3462 }
3463 }
3464
3465exit_lbl:
3466 if (unlikely(to_copy))
3467 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003468 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003469 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3470 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3471
3472 return to_copy;
3473}
3474#endif
3475
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003476/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003477 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003478 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003479 * @skb: packet skb
3480 * @pbd: parse BD
3481 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003482 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003483static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3484 struct eth_tx_parse_bd_e1x *pbd,
3485 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003486{
3487 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003488 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003489 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003490
3491 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003492 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003493 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003494 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3495 ip_hdr(skb)->daddr,
3496 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003497 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003498 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003499 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3500 &ipv6_hdr(skb)->daddr,
3501 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003502 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003503
Yuval Mintz86564c32013-01-23 03:21:50 +00003504 pbd->global_data |=
3505 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003506}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003508/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003509 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3510 *
3511 * @bp: driver handle
3512 * @skb: packet skb
3513 * @parsing_data: data to be updated
3514 * @xmit_type: xmit flags
3515 *
3516 * 57712/578xx related, when skb has encapsulation
3517 */
3518static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3519 u32 *parsing_data, u32 xmit_type)
3520{
3521 *parsing_data |=
3522 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3523 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3524 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3525
3526 if (xmit_type & XMIT_CSUM_TCP) {
3527 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3528 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3529 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3530
3531 return skb_inner_transport_header(skb) +
3532 inner_tcp_hdrlen(skb) - skb->data;
3533 }
3534
3535 /* We support checksum offload for TCP and UDP only.
3536 * No need to pass the UDP header length - it's a constant.
3537 */
3538 return skb_inner_transport_header(skb) +
3539 sizeof(struct udphdr) - skb->data;
3540}
3541
3542/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003543 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003544 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003545 * @bp: driver handle
3546 * @skb: packet skb
3547 * @parsing_data: data to be updated
3548 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003549 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003550 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003551 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003552static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3553 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003554{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003555 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003556 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003557 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3558 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003559
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003560 if (xmit_type & XMIT_CSUM_TCP) {
3561 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3562 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3563 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003564
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003565 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003566 }
3567 /* We support checksum offload for TCP and UDP only.
3568 * No need to pass the UDP header length - it's a constant.
3569 */
3570 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003571}
3572
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003573/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003574static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3575 struct eth_tx_start_bd *tx_start_bd,
3576 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003577{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003578 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3579
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003580 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003581 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003582
3583 if (!(xmit_type & XMIT_CSUM_TCP))
3584 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003585}
3586
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003587/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003588 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003589 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003590 * @bp: driver handle
3591 * @skb: packet skb
3592 * @pbd: parse BD to be updated
3593 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003594 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003595static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3596 struct eth_tx_parse_bd_e1x *pbd,
3597 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003598{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003599 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003600
3601 /* for now NS flag is not used in Linux */
3602 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003603 cpu_to_le16(hlen |
3604 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3605 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003606
3607 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003608 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003609
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003610 hlen += pbd->ip_hlen_w;
3611
3612 /* We support checksum offload for TCP and UDP only */
3613 if (xmit_type & XMIT_CSUM_TCP)
3614 hlen += tcp_hdrlen(skb) / 2;
3615 else
3616 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003617
3618 pbd->total_hlen_w = cpu_to_le16(hlen);
3619 hlen = hlen*2;
3620
3621 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003622 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003623
3624 } else {
3625 s8 fix = SKB_CS_OFF(skb); /* signed! */
3626
3627 DP(NETIF_MSG_TX_QUEUED,
3628 "hlen %d fix %d csum before fix %x\n",
3629 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3630
3631 /* HW bug: fixup the CSUM */
3632 pbd->tcp_pseudo_csum =
3633 bnx2x_csum_fix(skb_transport_header(skb),
3634 SKB_CS(skb), fix);
3635
3636 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3637 pbd->tcp_pseudo_csum);
3638 }
3639
3640 return hlen;
3641}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003642
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003643static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3644 struct eth_tx_parse_bd_e2 *pbd_e2,
3645 struct eth_tx_parse_2nd_bd *pbd2,
3646 u16 *global_data,
3647 u32 xmit_type)
3648{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003649 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003650 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003651
Dmitry Kravkove287a752013-03-21 15:38:24 +00003652 /* from outer IP to transport */
3653 hlen_w = (skb_inner_transport_header(skb) -
3654 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003655
3656 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003657 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003658
Dmitry Kravkove287a752013-03-21 15:38:24 +00003659 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003660
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003661 /* outer IP header info */
3662 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003663 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003664 u32 csum = (__force u32)(~iph->check) -
3665 (__force u32)iph->tot_len -
3666 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003667
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003668 outerip_len = iph->ihl << 1;
3669
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003670 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003671 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003672 } else {
3673 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003674 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003675 pbd_e2->data.tunnel_data.flags |=
Yuval Mintz28311f82015-07-22 09:16:22 +03003676 ETH_TUNNEL_DATA_IPV6_OUTER;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003677 }
3678
3679 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3680
3681 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3682
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003683 /* inner IP header info */
3684 if (xmit_type & XMIT_CSUM_ENC_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003685 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003686
3687 pbd_e2->data.tunnel_data.pseudo_csum =
3688 bswab16(~csum_tcpudp_magic(
3689 inner_ip_hdr(skb)->saddr,
3690 inner_ip_hdr(skb)->daddr,
3691 0, IPPROTO_TCP, 0));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003692 } else {
3693 pbd_e2->data.tunnel_data.pseudo_csum =
3694 bswab16(~csum_ipv6_magic(
3695 &inner_ipv6_hdr(skb)->saddr,
3696 &inner_ipv6_hdr(skb)->daddr,
3697 0, IPPROTO_TCP, 0));
3698 }
3699
3700 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3701
3702 *global_data |=
3703 outerip_off |
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003704 (outerip_len <<
3705 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3706 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3707 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003708
3709 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3710 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3711 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3712 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003713}
3714
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003715static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3716 u32 xmit_type)
3717{
3718 struct ipv6hdr *ipv6;
3719
3720 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3721 return;
3722
3723 if (xmit_type & XMIT_GSO_ENC_V6)
3724 ipv6 = inner_ipv6_hdr(skb);
3725 else /* XMIT_GSO_V6 */
3726 ipv6 = ipv6_hdr(skb);
3727
3728 if (ipv6->nexthdr == NEXTHDR_IPV6)
3729 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3730}
3731
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003732/* called with netif_tx_lock
3733 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3734 * netif_wake_queue()
3735 */
3736netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3737{
3738 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003739
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003740 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003741 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003742 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003743 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003744 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003745 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003746 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003747 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003748 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003749 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003750 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003751 dma_addr_t mapping;
3752 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3753 int i;
3754 u8 hlen = 0;
3755 __le16 pkt_size = 0;
3756 struct ethhdr *eth;
3757 u8 mac_type = UNICAST_ADDRESS;
3758
3759#ifdef BNX2X_STOP_ON_ERROR
3760 if (unlikely(bp->panic))
3761 return NETDEV_TX_BUSY;
3762#endif
3763
Ariel Elior6383c0b2011-07-14 08:31:57 +00003764 txq_index = skb_get_queue_mapping(skb);
3765 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003766
Merav Sicron55c11942012-11-07 00:45:48 +00003767 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003768
Merav Sicron65565882012-06-19 07:48:26 +00003769 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003770
3771 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003772 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003773 txq_index, fp_index, txdata_index); */
3774
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003775 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003776 DP(NETIF_MSG_TX_QUEUED,
3777 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003778 txdata->cid, fp_index, txdata_index, txdata, fp); */
3779
3780 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003781 skb_shinfo(skb)->nr_frags +
3782 BDS_PER_TX_PKT +
3783 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003784 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003785 if (txdata->tx_ring_size == 0) {
3786 struct bnx2x_eth_q_stats *q_stats =
3787 bnx2x_fp_qstats(bp, txdata->parent_fp);
3788 q_stats->driver_filtered_tx_pkt++;
3789 dev_kfree_skb(skb);
3790 return NETDEV_TX_OK;
3791 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003792 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3793 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003794 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003795
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003796 return NETDEV_TX_BUSY;
3797 }
3798
Merav Sicron51c1a582012-03-18 10:33:38 +00003799 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003800 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003801 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003802 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3803 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003804
3805 eth = (struct ethhdr *)skb->data;
3806
3807 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3808 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3809 if (is_broadcast_ether_addr(eth->h_dest))
3810 mac_type = BROADCAST_ADDRESS;
3811 else
3812 mac_type = MULTICAST_ADDRESS;
3813 }
3814
Dmitry Kravkov91226792013-03-11 05:17:52 +00003815#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003816 /* First, check if we need to linearize the skb (due to FW
3817 restrictions). No need to check fragmentation if page size > 8K
3818 (there will be no violation to FW restrictions) */
3819 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3820 /* Statistics of linearization */
3821 bp->lin_cnt++;
3822 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003823 DP(NETIF_MSG_TX_QUEUED,
3824 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003825 dev_kfree_skb_any(skb);
3826 return NETDEV_TX_OK;
3827 }
3828 }
3829#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003830 /* Map skb linear data for DMA */
3831 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3832 skb_headlen(skb), DMA_TO_DEVICE);
3833 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003834 DP(NETIF_MSG_TX_QUEUED,
3835 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003836 dev_kfree_skb_any(skb);
3837 return NETDEV_TX_OK;
3838 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003839 /*
3840 Please read carefully. First we use one BD which we mark as start,
3841 then we have a parsing info BD (used for TSO or xsum),
3842 and only then we have the rest of the TSO BDs.
3843 (don't forget to mark the last one as last,
3844 and to unmap only AFTER you write to the BD ...)
3845 And above all, all pdb sizes are in words - NOT DWORDS!
3846 */
3847
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003848 /* get current pkt produced now - advance it just before sending packet
3849 * since mapping of pages may fail and cause packet to be dropped
3850 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003851 pkt_prod = txdata->tx_pkt_prod;
3852 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003853
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003854 /* get a tx_buf and first BD
3855 * tx_start_bd may be changed during SPLIT,
3856 * but first_bd will always stay first
3857 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003858 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3859 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003860 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003861
3862 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003863
Michal Kalderoneeed0182014-08-17 16:47:44 +03003864 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3865 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
Guilherme G. Piccolifdd098e2019-06-27 13:31:33 -03003866 bp->eth_stats.ptp_skip_tx_ts++;
Michal Kalderoneeed0182014-08-17 16:47:44 +03003867 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3868 } else if (bp->ptp_tx_skb) {
Guilherme G. Piccolifdd098e2019-06-27 13:31:33 -03003869 bp->eth_stats.ptp_skip_tx_ts++;
3870 dev_err_once(&bp->dev->dev,
3871 "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
Michal Kalderoneeed0182014-08-17 16:47:44 +03003872 } else {
3873 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3874 /* schedule check for Tx timestamp */
3875 bp->ptp_tx_skb = skb_get(skb);
3876 bp->ptp_tx_start = jiffies;
3877 schedule_work(&bp->ptp_task);
3878 }
3879 }
3880
Dmitry Kravkov91226792013-03-11 05:17:52 +00003881 /* header nbd: indirectly zero other flags! */
3882 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003883
3884 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003885 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003886 tx_buf->skb = skb;
3887 tx_buf->flags = 0;
3888
3889 DP(NETIF_MSG_TX_QUEUED,
3890 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003891 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003892
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003893 if (skb_vlan_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003894 tx_start_bd->vlan_or_ethertype =
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003895 cpu_to_le16(skb_vlan_tag_get(skb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003896 tx_start_bd->bd_flags.as_bitfield |=
3897 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003898 } else {
3899 /* when transmitting in a vf, start bd must hold the ethertype
3900 * for fw to enforce it
3901 */
Mintz, Yuval5c27f642017-06-09 17:17:01 +03003902 u16 vlan_tci = 0;
Yuval Mintzea36475a2014-08-25 17:48:30 +03003903#ifndef BNX2X_STOP_ON_ERROR
Mintz, Yuval5c27f642017-06-09 17:17:01 +03003904 if (IS_VF(bp)) {
Yuval Mintzea36475a2014-08-25 17:48:30 +03003905#endif
Mintz, Yuval5c27f642017-06-09 17:17:01 +03003906 /* Still need to consider inband vlan for enforced */
3907 if (__vlan_get_tag(skb, &vlan_tci)) {
3908 tx_start_bd->vlan_or_ethertype =
3909 cpu_to_le16(ntohs(eth->h_proto));
3910 } else {
3911 tx_start_bd->bd_flags.as_bitfield |=
3912 (X_ETH_INBAND_VLAN <<
3913 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3914 tx_start_bd->vlan_or_ethertype =
3915 cpu_to_le16(vlan_tci);
3916 }
Yuval Mintzea36475a2014-08-25 17:48:30 +03003917#ifndef BNX2X_STOP_ON_ERROR
Mintz, Yuval5c27f642017-06-09 17:17:01 +03003918 } else {
Ariel Eliordc1ba592013-01-01 05:22:30 +00003919 /* used by FW for packet accounting */
3920 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Mintz, Yuval5c27f642017-06-09 17:17:01 +03003921 }
Yuval Mintzea36475a2014-08-25 17:48:30 +03003922#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003923 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003924
Dmitry Kravkov91226792013-03-11 05:17:52 +00003925 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3926
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003927 /* turn on parsing and get a BD */
3928 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003929
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003930 if (xmit_type & XMIT_CSUM)
3931 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003932
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003933 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003934 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003935 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003936
3937 if (xmit_type & XMIT_CSUM_ENC) {
3938 u16 global_data = 0;
3939
3940 /* Set PBD in enc checksum offload case */
3941 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3942 &pbd_e2_parsing_data,
3943 xmit_type);
3944
3945 /* turn on 2nd parsing and get a BD */
3946 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3947
3948 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3949
3950 memset(pbd2, 0, sizeof(*pbd2));
3951
3952 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3953 (skb_inner_network_header(skb) -
3954 skb->data) >> 1;
3955
3956 if (xmit_type & XMIT_GSO_ENC)
3957 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3958 &global_data,
3959 xmit_type);
3960
3961 pbd2->global_data = cpu_to_le16(global_data);
3962
3963 /* add addition parse BD indication to start BD */
3964 SET_FLAG(tx_start_bd->general_data,
3965 ETH_TX_START_BD_PARSE_NBDS, 1);
3966 /* set encapsulation flag in start BD */
3967 SET_FLAG(tx_start_bd->general_data,
3968 ETH_TX_START_BD_TUNNEL_EXIST, 1);
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +03003969
3970 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3971
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003972 nbd++;
3973 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003974 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003975 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3976 &pbd_e2_parsing_data,
3977 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003978 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003979
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003980 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
Yuval Mintzbabe7232014-02-27 15:42:26 +02003981 /* Add the macs to the parsing BD if this is a vf or if
3982 * Tx Switching is enabled.
3983 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003984 if (IS_VF(bp)) {
3985 /* override GRE parameters in BD */
3986 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3987 &pbd_e2->data.mac_addr.src_mid,
3988 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003989 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003990
3991 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3992 &pbd_e2->data.mac_addr.dst_mid,
3993 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003994 eth->h_dest);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003995 } else {
3996 if (bp->flags & TX_SWITCHING)
3997 bnx2x_set_fw_mac_addr(
3998 &pbd_e2->data.mac_addr.dst_hi,
3999 &pbd_e2->data.mac_addr.dst_mid,
4000 &pbd_e2->data.mac_addr.dst_lo,
4001 eth->h_dest);
4002#ifdef BNX2X_STOP_ON_ERROR
4003 /* Enforce security is always set in Stop on Error -
4004 * source mac should be present in the parsing BD
4005 */
4006 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4007 &pbd_e2->data.mac_addr.src_mid,
4008 &pbd_e2->data.mac_addr.src_lo,
4009 eth->h_source);
4010#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004011 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004012
4013 SET_FLAG(pbd_e2_parsing_data,
4014 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004015 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004016 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004017 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004018 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4019 /* Set PBD in checksum offload case */
4020 if (xmit_type & XMIT_CSUM)
4021 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004022
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004023 SET_FLAG(global_data,
4024 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4025 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004026 }
4027
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004028 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004029 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4030 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004031 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4032 pkt_size = tx_start_bd->nbytes;
4033
Merav Sicron51c1a582012-03-18 10:33:38 +00004034 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004035 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004036 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004037 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004038 tx_start_bd->bd_flags.as_bitfield,
4039 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004040
4041 if (xmit_type & XMIT_GSO) {
4042
4043 DP(NETIF_MSG_TX_QUEUED,
4044 "TSO packet len %d hlen %d total len %d tso size %d\n",
4045 skb->len, hlen, skb_headlen(skb),
4046 skb_shinfo(skb)->gso_size);
4047
4048 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4049
Dmitry Kravkov91226792013-03-11 05:17:52 +00004050 if (unlikely(skb_headlen(skb) > hlen)) {
4051 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004052 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4053 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004054 bd_prod);
4055 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004056 if (!CHIP_IS_E1x(bp))
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004057 pbd_e2_parsing_data |=
4058 (skb_shinfo(skb)->gso_size <<
4059 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4060 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004061 else
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004062 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004063 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00004064
4065 /* Set the PBD's parsing_data field if not zero
4066 * (for the chips newer than 57711).
4067 */
4068 if (pbd_e2_parsing_data)
4069 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4070
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004071 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4072
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004073 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004074 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4075 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4076
Eric Dumazet9e903e02011-10-18 21:00:24 +00004077 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4078 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004079 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00004080 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004081
Merav Sicron51c1a582012-03-18 10:33:38 +00004082 DP(NETIF_MSG_TX_QUEUED,
4083 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004084
4085 /* we need unmap all buffers already mapped
4086 * for this SKB;
4087 * first_bd->nbd need to be properly updated
4088 * before call to bnx2x_free_tx_pkt
4089 */
4090 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004091 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00004092 TX_BD(txdata->tx_pkt_prod),
4093 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004094 return NETDEV_TX_OK;
4095 }
4096
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004097 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00004098 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004099 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00004100 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004101
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004102 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4103 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00004104 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4105 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004106 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004107
4108 DP(NETIF_MSG_TX_QUEUED,
4109 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4110 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4111 le16_to_cpu(tx_data_bd->nbytes));
4112 }
4113
4114 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4115
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004116 /* update with actual num BDs */
4117 first_bd->nbd = cpu_to_le16(nbd);
4118
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004119 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4120
4121 /* now send a tx doorbell, counting the next BD
4122 * if the packet contains or ends with it
4123 */
4124 if (TX_BD_POFF(bd_prod) < nbd)
4125 nbd++;
4126
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004127 /* total_pkt_bytes should be set on the first data BD if
4128 * it's not an LSO packet and there is more than one
4129 * data BD. In this case pkt_size is limited by an MTU value.
4130 * However we prefer to set it for an LSO packet (while we don't
4131 * have to) in order to save some CPU cycles in a none-LSO
4132 * case, when we much more care about them.
4133 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004134 if (total_pkt_bd != NULL)
4135 total_pkt_bd->total_pkt_bytes = pkt_size;
4136
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004137 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004138 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004139 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004140 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4141 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4142 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4143 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004144 if (pbd_e2)
4145 DP(NETIF_MSG_TX_QUEUED,
4146 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004147 pbd_e2,
4148 pbd_e2->data.mac_addr.dst_hi,
4149 pbd_e2->data.mac_addr.dst_mid,
4150 pbd_e2->data.mac_addr.dst_lo,
4151 pbd_e2->data.mac_addr.src_hi,
4152 pbd_e2->data.mac_addr.src_mid,
4153 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004154 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004155 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4156
Tom Herbert2df1a702011-11-28 16:33:37 +00004157 netdev_tx_sent_queue(txq, skb->len);
4158
Willem de Bruijn8373c572012-04-27 09:04:06 +00004159 skb_tx_timestamp(skb);
4160
Ariel Elior6383c0b2011-07-14 08:31:57 +00004161 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004162 /*
4163 * Make sure that the BD data is updated before updating the producer
4164 * since FW might read the BD right after the producer is updated.
4165 * This is only applicable for weak-ordered memory model archs such
4166 * as IA-64. The following barrier is also mandatory since FW will
4167 * assumes packets must have BDs.
4168 */
4169 wmb();
4170
Ariel Elior6383c0b2011-07-14 08:31:57 +00004171 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004172 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004173
Ariel Elior6383c0b2011-07-14 08:31:57 +00004174 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004175
4176 mmiowb();
4177
Ariel Elior6383c0b2011-07-14 08:31:57 +00004178 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004179
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004180 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004181 netif_tx_stop_queue(txq);
4182
4183 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4184 * ordering of set_bit() in netif_tx_stop_queue() and read of
4185 * fp->bd_tx_cons */
4186 smp_mb();
4187
Barak Witkowski15192a82012-06-19 07:48:28 +00004188 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004189 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004190 netif_tx_wake_queue(txq);
4191 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004192 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004193
4194 return NETDEV_TX_OK;
4195}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004196
Yuval Mintz230d00e2015-07-22 09:16:25 +03004197void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4198{
4199 int mfw_vn = BP_FW_MB_IDX(bp);
4200 u32 tmp;
4201
4202 /* If the shmem shouldn't affect configuration, reflect */
4203 if (!IS_MF_BD(bp)) {
4204 int i;
4205
4206 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4207 c2s_map[i] = i;
4208 *c2s_default = 0;
4209
4210 return;
4211 }
4212
4213 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4214 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4215 c2s_map[0] = tmp & 0xff;
4216 c2s_map[1] = (tmp >> 8) & 0xff;
4217 c2s_map[2] = (tmp >> 16) & 0xff;
4218 c2s_map[3] = (tmp >> 24) & 0xff;
4219
4220 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4221 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4222 c2s_map[4] = tmp & 0xff;
4223 c2s_map[5] = (tmp >> 8) & 0xff;
4224 c2s_map[6] = (tmp >> 16) & 0xff;
4225 c2s_map[7] = (tmp >> 24) & 0xff;
4226
4227 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4228 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4229 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4230}
4231
Ariel Elior6383c0b2011-07-14 08:31:57 +00004232/**
4233 * bnx2x_setup_tc - routine to configure net_device for multi tc
4234 *
4235 * @netdev: net device to configure
4236 * @tc: number of traffic classes to enable
4237 *
4238 * callback connected to the ndo_setup_tc function pointer
4239 */
4240int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4241{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004242 struct bnx2x *bp = netdev_priv(dev);
Yuval Mintz230d00e2015-07-22 09:16:25 +03004243 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4244 int cos, prio, count, offset;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004245
4246 /* setup tc must be called under rtnl lock */
4247 ASSERT_RTNL();
4248
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004249 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004250 if (!num_tc) {
4251 netdev_reset_tc(dev);
4252 return 0;
4253 }
4254
4255 /* requested to support too many traffic classes */
4256 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004257 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004258 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004259 return -EINVAL;
4260 }
4261
4262 /* declare amount of supported traffic classes */
4263 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004264 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004265 return -EINVAL;
4266 }
4267
Yuval Mintz230d00e2015-07-22 09:16:25 +03004268 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4269
Ariel Elior6383c0b2011-07-14 08:31:57 +00004270 /* configure priority to traffic class mapping */
4271 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
Yuval Mintz230d00e2015-07-22 09:16:25 +03004272 int outer_prio = c2s_map[prio];
4273
4274 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004275 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4276 "mapping priority %d to tc %d\n",
Yuval Mintz230d00e2015-07-22 09:16:25 +03004277 outer_prio, bp->prio_to_cos[outer_prio]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004278 }
4279
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004280 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004281 This can be used for ets or pfc, and save the effort of setting
4282 up a multio class queue disc or negotiating DCBX with a switch
4283 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004284 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004285 for (prio = 1; prio < 16; prio++) {
4286 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004287 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004288 } */
4289
4290 /* configure traffic class to transmission queue mapping */
4291 for (cos = 0; cos < bp->max_cos; cos++) {
4292 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004293 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004294 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004295 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4296 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004297 cos, offset, count);
4298 }
4299
4300 return 0;
4301}
4302
John Fastabend16e5cc62016-02-16 21:16:43 -08004303int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4304 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -08004305{
John Fastabend5eb4dce2016-02-29 11:26:13 -08004306 if (tc->type != TC_SETUP_MQPRIO)
John Fastabende4c67342016-02-16 21:16:15 -08004307 return -EINVAL;
John Fastabend16e5cc62016-02-16 21:16:43 -08004308 return bnx2x_setup_tc(dev, tc->tc);
John Fastabende4c67342016-02-16 21:16:15 -08004309}
4310
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004311/* called with rtnl_lock */
4312int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4313{
4314 struct sockaddr *addr = p;
4315 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004316 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004317
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004318 if (!is_valid_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004319 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004320 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004321 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004322
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004323 if (IS_MF_STORAGE_ONLY(bp)) {
4324 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004325 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004326 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004327
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004328 if (netif_running(dev)) {
4329 rc = bnx2x_set_eth_mac(bp, false);
4330 if (rc)
4331 return rc;
4332 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004333
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004334 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4335
4336 if (netif_running(dev))
4337 rc = bnx2x_set_eth_mac(bp, true);
4338
Yuval Mintz230d00e2015-07-22 09:16:25 +03004339 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4340 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4341
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004342 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004343}
4344
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004345static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4346{
4347 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4348 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004349 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004350
4351 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004352
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004353 if (IS_FCOE_IDX(fp_index)) {
4354 memset(sb, 0, sizeof(union host_hc_status_block));
4355 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004356 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004357 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004358 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004359 BNX2X_PCI_FREE(sb->e2_sb,
4360 bnx2x_fp(bp, fp_index,
4361 status_blk_mapping),
4362 sizeof(struct host_hc_status_block_e2));
4363 else
4364 BNX2X_PCI_FREE(sb->e1x_sb,
4365 bnx2x_fp(bp, fp_index,
4366 status_blk_mapping),
4367 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004368 }
Merav Sicron55c11942012-11-07 00:45:48 +00004369
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004370 /* Rx */
4371 if (!skip_rx_queue(bp, fp_index)) {
4372 bnx2x_free_rx_bds(fp);
4373
4374 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4375 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4376 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4377 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4378 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4379
4380 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4381 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4382 sizeof(struct eth_fast_path_rx_cqe) *
4383 NUM_RCQ_BD);
4384
4385 /* SGE ring */
4386 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4387 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4388 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4389 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4390 }
4391
4392 /* Tx */
4393 if (!skip_tx_queue(bp, fp_index)) {
4394 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004395 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004396 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004397
Merav Sicron51c1a582012-03-18 10:33:38 +00004398 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004399 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004400 fp_index, cos, txdata->cid);
4401
4402 BNX2X_FREE(txdata->tx_buf_ring);
4403 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4404 txdata->tx_desc_mapping,
4405 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4406 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004407 }
4408 /* end of fastpath */
4409}
4410
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004411static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004412{
4413 int i;
4414 for_each_cnic_queue(bp, i)
4415 bnx2x_free_fp_mem_at(bp, i);
4416}
4417
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004418void bnx2x_free_fp_mem(struct bnx2x *bp)
4419{
4420 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004421 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004422 bnx2x_free_fp_mem_at(bp, i);
4423}
4424
Eric Dumazet1191cb82012-04-27 21:39:21 +00004425static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004426{
4427 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004428 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004429 bnx2x_fp(bp, index, sb_index_values) =
4430 (__le16 *)status_blk.e2_sb->sb.index_values;
4431 bnx2x_fp(bp, index, sb_running_index) =
4432 (__le16 *)status_blk.e2_sb->sb.running_index;
4433 } else {
4434 bnx2x_fp(bp, index, sb_index_values) =
4435 (__le16 *)status_blk.e1x_sb->sb.index_values;
4436 bnx2x_fp(bp, index, sb_running_index) =
4437 (__le16 *)status_blk.e1x_sb->sb.running_index;
4438 }
4439}
4440
Eric Dumazet1191cb82012-04-27 21:39:21 +00004441/* Returns the number of actually allocated BDs */
4442static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4443 int rx_ring_size)
4444{
4445 struct bnx2x *bp = fp->bp;
4446 u16 ring_prod, cqe_ring_prod;
4447 int i, failure_cnt = 0;
4448
4449 fp->rx_comp_cons = 0;
4450 cqe_ring_prod = ring_prod = 0;
4451
4452 /* This routine is called only during fo init so
4453 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4454 */
4455 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004456 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004457 failure_cnt++;
4458 continue;
4459 }
4460 ring_prod = NEXT_RX_IDX(ring_prod);
4461 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4462 WARN_ON(ring_prod <= (i - failure_cnt));
4463 }
4464
4465 if (failure_cnt)
4466 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4467 i - failure_cnt, fp->index);
4468
4469 fp->rx_bd_prod = ring_prod;
4470 /* Limit the CQE producer by the CQE ring size */
4471 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4472 cqe_ring_prod);
Eric Dumazet1191cb82012-04-27 21:39:21 +00004473
Barak Witkowski15192a82012-06-19 07:48:28 +00004474 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004475
4476 return i - failure_cnt;
4477}
4478
4479static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4480{
4481 int i;
4482
4483 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4484 struct eth_rx_cqe_next_page *nextpg;
4485
4486 nextpg = (struct eth_rx_cqe_next_page *)
4487 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4488 nextpg->addr_hi =
4489 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4490 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4491 nextpg->addr_lo =
4492 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4493 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4494 }
4495}
4496
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004497static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4498{
4499 union host_hc_status_block *sb;
4500 struct bnx2x_fastpath *fp = &bp->fp[index];
4501 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004502 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004503 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004504
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004505 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004506 rx_ring_size = MIN_RX_SIZE_NONTPA;
4507 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004508 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004509 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4510
Yuval Mintz065f8b92012-10-03 04:22:59 +00004511 if (CHIP_IS_E3(bp)) {
4512 u32 cfg = SHMEM_RD(bp,
4513 dev_info.port_hw_config[BP_PORT(bp)].
4514 default_cfg);
4515
4516 /* Decrease ring size for 1G functions */
4517 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4518 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4519 rx_ring_size /= 10;
4520 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004521
David S. Miller8decf862011-09-22 03:23:13 -04004522 /* allocate at least number of buffers required by FW */
4523 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4524 MIN_RX_SIZE_TPA, rx_ring_size);
4525
4526 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004527 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004528 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004529
Yuval Mintz04c46732013-01-23 03:21:46 +00004530 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4531
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004532 /* Common */
4533 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004534
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004535 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004536 /* status blocks */
Joe Perchescd2b0382014-02-20 13:25:51 -08004537 if (!CHIP_IS_E1x(bp)) {
4538 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4539 sizeof(struct host_hc_status_block_e2));
4540 if (!sb->e2_sb)
4541 goto alloc_mem_err;
4542 } else {
4543 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4544 sizeof(struct host_hc_status_block_e1x));
4545 if (!sb->e1x_sb)
4546 goto alloc_mem_err;
4547 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004548 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004549
4550 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4551 * set shortcuts for it.
4552 */
4553 if (!IS_FCOE_IDX(index))
4554 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004555
4556 /* Tx */
4557 if (!skip_tx_queue(bp, index)) {
4558 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004559 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004560 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004561
Merav Sicron51c1a582012-03-18 10:33:38 +00004562 DP(NETIF_MSG_IFUP,
4563 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004564 index, cos);
4565
Joe Perchescd2b0382014-02-20 13:25:51 -08004566 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4567 sizeof(struct sw_tx_bd),
4568 GFP_KERNEL);
4569 if (!txdata->tx_buf_ring)
4570 goto alloc_mem_err;
4571 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4572 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4573 if (!txdata->tx_desc_ring)
4574 goto alloc_mem_err;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004575 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004576 }
4577
4578 /* Rx */
4579 if (!skip_rx_queue(bp, index)) {
4580 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Joe Perchescd2b0382014-02-20 13:25:51 -08004581 bnx2x_fp(bp, index, rx_buf_ring) =
4582 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4583 if (!bnx2x_fp(bp, index, rx_buf_ring))
4584 goto alloc_mem_err;
4585 bnx2x_fp(bp, index, rx_desc_ring) =
4586 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4587 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4588 if (!bnx2x_fp(bp, index, rx_desc_ring))
4589 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004590
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004591 /* Seed all CQEs by 1s */
Joe Perchescd2b0382014-02-20 13:25:51 -08004592 bnx2x_fp(bp, index, rx_comp_ring) =
4593 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4594 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4595 if (!bnx2x_fp(bp, index, rx_comp_ring))
4596 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004597
4598 /* SGE ring */
Joe Perchescd2b0382014-02-20 13:25:51 -08004599 bnx2x_fp(bp, index, rx_page_ring) =
4600 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4601 GFP_KERNEL);
4602 if (!bnx2x_fp(bp, index, rx_page_ring))
4603 goto alloc_mem_err;
4604 bnx2x_fp(bp, index, rx_sge_ring) =
4605 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4606 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4607 if (!bnx2x_fp(bp, index, rx_sge_ring))
4608 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004609 /* RX BD ring */
4610 bnx2x_set_next_page_rx_bd(fp);
4611
4612 /* CQ ring */
4613 bnx2x_set_next_page_rx_cq(fp);
4614
4615 /* BDs */
4616 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4617 if (ring_size < rx_ring_size)
4618 goto alloc_mem_err;
4619 }
4620
4621 return 0;
4622
4623/* handles low memory cases */
4624alloc_mem_err:
4625 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4626 index, ring_size);
4627 /* FW will drop all packets if queue is not big enough,
4628 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004629 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004630 */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02004631 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004632 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004633 /* release memory allocated for this queue */
4634 bnx2x_free_fp_mem_at(bp, index);
4635 return -ENOMEM;
4636 }
4637 return 0;
4638}
4639
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004640static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004641{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004642 if (!NO_FCOE(bp))
4643 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004644 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004645 /* we will fail load process instead of mark
4646 * NO_FCOE_FLAG
4647 */
4648 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004649
4650 return 0;
4651}
4652
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004653static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004654{
4655 int i;
4656
4657 /* 1. Allocate FP for leading - fatal if error
4658 * 2. Allocate RSS - fix number of queues if error
4659 */
4660
4661 /* leading */
4662 if (bnx2x_alloc_fp_mem_at(bp, 0))
4663 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004664
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004665 /* RSS */
4666 for_each_nondefault_eth_queue(bp, i)
4667 if (bnx2x_alloc_fp_mem_at(bp, i))
4668 break;
4669
4670 /* handle memory failures */
4671 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4672 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4673
4674 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004675 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004676 if (CNIC_SUPPORT(bp))
4677 /* move non eth FPs next to last eth FP
4678 * must be done in that order
4679 * FCOE_IDX < FWD_IDX < OOO_IDX
4680 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004681
Merav Sicron55c11942012-11-07 00:45:48 +00004682 /* move FCoE fp even NO_FCOE_FLAG is on */
4683 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4684 bp->num_ethernet_queues -= delta;
4685 bp->num_queues = bp->num_ethernet_queues +
4686 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004687 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4688 bp->num_queues + delta, bp->num_queues);
4689 }
4690
4691 return 0;
4692}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004693
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004694void bnx2x_free_mem_bp(struct bnx2x *bp)
4695{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004696 int i;
4697
4698 for (i = 0; i < bp->fp_array_size; i++)
4699 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004700 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004701 kfree(bp->sp_objs);
4702 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004703 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004704 kfree(bp->msix_table);
4705 kfree(bp->ilt);
4706}
4707
Bill Pemberton0329aba2012-12-03 09:24:24 -05004708int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004709{
4710 struct bnx2x_fastpath *fp;
4711 struct msix_entry *tbl;
4712 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004713 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004714 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004715 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004716
Ariel Elior6383c0b2011-07-14 08:31:57 +00004717 /*
4718 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004719 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004720 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004721 msix_table_size = bp->igu_sb_cnt;
4722 if (IS_PF(bp))
4723 msix_table_size++;
4724 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004725
4726 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004727 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004728 bp->fp_array_size = fp_array_size;
4729 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004730
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004731 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004732 if (!fp)
4733 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004734 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004735 fp[i].tpa_info =
4736 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4737 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4738 if (!(fp[i].tpa_info))
4739 goto alloc_err;
4740 }
4741
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004742 bp->fp = fp;
4743
Barak Witkowski15192a82012-06-19 07:48:28 +00004744 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004745 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004746 GFP_KERNEL);
4747 if (!bp->sp_objs)
4748 goto alloc_err;
4749
4750 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004751 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004752 GFP_KERNEL);
4753 if (!bp->fp_stats)
4754 goto alloc_err;
4755
Merav Sicron65565882012-06-19 07:48:26 +00004756 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004757 txq_array_size =
4758 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4759 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4760
4761 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4762 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004763 if (!bp->bnx2x_txq)
4764 goto alloc_err;
4765
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004766 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004767 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004768 if (!tbl)
4769 goto alloc_err;
4770 bp->msix_table = tbl;
4771
4772 /* ilt */
4773 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4774 if (!ilt)
4775 goto alloc_err;
4776 bp->ilt = ilt;
4777
4778 return 0;
4779alloc_err:
4780 bnx2x_free_mem_bp(bp);
4781 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004782}
4783
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004784int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004785{
4786 struct bnx2x *bp = netdev_priv(dev);
4787
4788 if (unlikely(!netif_running(dev)))
4789 return 0;
4790
Yuval Mintz5d07d862012-09-13 02:56:21 +00004791 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004792 return bnx2x_nic_load(bp, LOAD_NORMAL);
4793}
4794
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004795int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4796{
4797 u32 sel_phy_idx = 0;
4798 if (bp->link_params.num_phys <= 1)
4799 return INT_PHY;
4800
4801 if (bp->link_vars.link_up) {
4802 sel_phy_idx = EXT_PHY1;
4803 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4804 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4805 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4806 sel_phy_idx = EXT_PHY2;
4807 } else {
4808
4809 switch (bnx2x_phy_selection(&bp->link_params)) {
4810 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4811 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4812 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4813 sel_phy_idx = EXT_PHY1;
4814 break;
4815 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4816 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4817 sel_phy_idx = EXT_PHY2;
4818 break;
4819 }
4820 }
4821
4822 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004823}
4824int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4825{
4826 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4827 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004828 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004829 * swapping is enabled). So when swapping is enabled, we need to reverse
4830 * the configuration
4831 */
4832
4833 if (bp->link_params.multi_phy_config &
4834 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4835 if (sel_phy_idx == EXT_PHY1)
4836 sel_phy_idx = EXT_PHY2;
4837 else if (sel_phy_idx == EXT_PHY2)
4838 sel_phy_idx = EXT_PHY1;
4839 }
4840 return LINK_CONFIG_IDX(sel_phy_idx);
4841}
4842
Merav Sicron55c11942012-11-07 00:45:48 +00004843#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004844int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4845{
4846 struct bnx2x *bp = netdev_priv(dev);
4847 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4848
4849 switch (type) {
4850 case NETDEV_FCOE_WWNN:
4851 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4852 cp->fcoe_wwn_node_name_lo);
4853 break;
4854 case NETDEV_FCOE_WWPN:
4855 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4856 cp->fcoe_wwn_port_name_lo);
4857 break;
4858 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004859 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004860 return -EINVAL;
4861 }
4862
4863 return 0;
4864}
4865#endif
4866
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004867/* called with rtnl_lock */
4868int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4869{
4870 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004871
Yuval Mintz0650c0b2015-05-04 12:34:12 +03004872 if (pci_num_vf(bp->pdev)) {
4873 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4874 return -EPERM;
4875 }
4876
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004877 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004878 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004879 return -EAGAIN;
4880 }
4881
4882 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004883 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4884 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004885 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004886 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004887
4888 /* This does not race with packet allocation
4889 * because the actual alloc size is
4890 * only updated as part of load
4891 */
4892 dev->mtu = new_mtu;
4893
Yuval Mintz230d00e2015-07-22 09:16:25 +03004894 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4895 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4896
Michał Mirosław66371c42011-04-12 09:38:23 +00004897 return bnx2x_reload_if_running(dev);
4898}
4899
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004900netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004901 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004902{
4903 struct bnx2x *bp = netdev_priv(dev);
4904
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004905 if (pci_num_vf(bp->pdev)) {
4906 netdev_features_t changed = dev->features ^ features;
4907
4908 /* Revert the requested changes in features if they
4909 * would require internal reload of PF in bnx2x_set_features().
4910 */
4911 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4912 features &= ~NETIF_F_RXCSUM;
4913 features |= dev->features & NETIF_F_RXCSUM;
4914 }
4915
4916 if (changed & NETIF_F_LOOPBACK) {
4917 features &= ~NETIF_F_LOOPBACK;
4918 features |= dev->features & NETIF_F_LOOPBACK;
4919 }
4920 }
4921
Michał Mirosław66371c42011-04-12 09:38:23 +00004922 /* TPA requires Rx CSUM offloading */
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004923 if (!(features & NETIF_F_RXCSUM)) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004924 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004925 features &= ~NETIF_F_GRO;
4926 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004927
4928 return features;
4929}
4930
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004931int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004932{
4933 struct bnx2x *bp = netdev_priv(dev);
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004934 netdev_features_t changes = features ^ dev->features;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004935 bool bnx2x_reload = false;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004936 int rc;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004937
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004938 /* VFs or non SRIOV PFs should be able to change loopback feature */
4939 if (!pci_num_vf(bp->pdev)) {
4940 if (features & NETIF_F_LOOPBACK) {
4941 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4942 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4943 bnx2x_reload = true;
4944 }
4945 } else {
4946 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4947 bp->link_params.loopback_mode = LOOPBACK_NONE;
4948 bnx2x_reload = true;
4949 }
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004950 }
4951 }
4952
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004953 /* if GRO is changed while LRO is enabled, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004954 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4955 changes &= ~NETIF_F_GRO;
Eric Dumazet8802f572013-05-18 07:14:53 +00004956
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004957 /* if GRO is changed while HW TPA is off, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004958 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4959 changes &= ~NETIF_F_GRO;
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004960
Eric Dumazet8802f572013-05-18 07:14:53 +00004961 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004962 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004963
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004964 if (bnx2x_reload) {
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004965 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4966 dev->features = features;
4967 rc = bnx2x_reload_if_running(dev);
4968 return rc ? rc : 1;
4969 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004970 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004971 }
4972
Michał Mirosław66371c42011-04-12 09:38:23 +00004973 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004974}
4975
4976void bnx2x_tx_timeout(struct net_device *dev)
4977{
4978 struct bnx2x *bp = netdev_priv(dev);
4979
4980#ifdef BNX2X_STOP_ON_ERROR
4981 if (!bp->panic)
4982 bnx2x_panic();
4983#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004984
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004985 /* This allows the netif to be shutdown gracefully before resetting */
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004986 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004987}
4988
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004989int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4990{
4991 struct net_device *dev = pci_get_drvdata(pdev);
4992 struct bnx2x *bp;
4993
4994 if (!dev) {
4995 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4996 return -ENODEV;
4997 }
4998 bp = netdev_priv(dev);
4999
5000 rtnl_lock();
5001
5002 pci_save_state(pdev);
5003
5004 if (!netif_running(dev)) {
5005 rtnl_unlock();
5006 return 0;
5007 }
5008
5009 netif_device_detach(dev);
5010
Yuval Mintz5d07d862012-09-13 02:56:21 +00005011 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005012
5013 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5014
5015 rtnl_unlock();
5016
5017 return 0;
5018}
5019
5020int bnx2x_resume(struct pci_dev *pdev)
5021{
5022 struct net_device *dev = pci_get_drvdata(pdev);
5023 struct bnx2x *bp;
5024 int rc;
5025
5026 if (!dev) {
5027 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5028 return -ENODEV;
5029 }
5030 bp = netdev_priv(dev);
5031
5032 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005033 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005034 return -EAGAIN;
5035 }
5036
5037 rtnl_lock();
5038
5039 pci_restore_state(pdev);
5040
5041 if (!netif_running(dev)) {
5042 rtnl_unlock();
5043 return 0;
5044 }
5045
5046 bnx2x_set_power_state(bp, PCI_D0);
5047 netif_device_attach(dev);
5048
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005049 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5050
5051 rtnl_unlock();
5052
5053 return rc;
5054}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005055
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005056void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5057 u32 cid)
5058{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03005059 if (!cxt) {
5060 BNX2X_ERR("bad context pointer %p\n", cxt);
5061 return;
5062 }
5063
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005064 /* ustorm cxt validation */
5065 cxt->ustorm_ag_context.cdu_usage =
5066 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5067 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5068 /* xcontext validation */
5069 cxt->xstorm_ag_context.cdu_reserved =
5070 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5071 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5072}
5073
Eric Dumazet1191cb82012-04-27 21:39:21 +00005074static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5075 u8 fw_sb_id, u8 sb_index,
5076 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005077{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005078 u32 addr = BAR_CSTRORM_INTMEM +
5079 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5080 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00005081 DP(NETIF_MSG_IFUP,
5082 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5083 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005084}
5085
Eric Dumazet1191cb82012-04-27 21:39:21 +00005086static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5087 u16 fw_sb_id, u8 sb_index,
5088 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005089{
5090 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5091 u32 addr = BAR_CSTRORM_INTMEM +
5092 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005093 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005094 /* clear and set */
5095 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5096 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005097 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00005098 DP(NETIF_MSG_IFUP,
5099 "port %x fw_sb_id %d sb_index %d disable %d\n",
5100 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005101}
5102
5103void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5104 u8 sb_index, u8 disable, u16 usec)
5105{
5106 int port = BP_PORT(bp);
5107 u8 ticks = usec / BNX2X_BTR;
5108
5109 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5110
5111 disable = disable ? 1 : (usec ? 0 : 1);
5112 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5113}
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005114
5115void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5116 u32 verbose)
5117{
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005118 smp_mb__before_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005119 set_bit(flag, &bp->sp_rtnl_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005120 smp_mb__after_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005121 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5122 flag);
5123 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5124}