blob: f619c4cac51f0f3a8a6631091c8e022de087a719 [file] [log] [blame]
Yuval Mintz4ad79e12015-07-22 09:16:23 +03001/* bnx2x_cmn.c: QLogic Everest network driver.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Yuval Mintz4ad79e12015-07-22 09:16:23 +03004 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
Ariel Elior08f6dd82014-05-27 13:11:36 +030011 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000012 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
Joe Perchesf1deab52011-08-14 12:16:21 +000020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000022#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000023#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000024#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000025#include <linux/ip.h>
Amir Vadaic9931892014-08-25 16:06:54 +030026#include <linux/crash_dump.h>
Yuval Mintz99690852013-01-14 05:11:49 +000027#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000028#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070029#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030030#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000031#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000033#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000035
stephen hemmingera8f47eb2014-01-09 22:20:11 -080036static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
stephen hemmingera8f47eb2014-01-09 22:20:11 -080049 }
50}
51
52static void bnx2x_add_all_napi(struct bnx2x *bp)
53{
54 int i;
55
56 /* Add NAPI objects */
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
stephen hemmingera8f47eb2014-01-09 22:20:11 -080060 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
Michal Schmidt7d0445d2014-02-25 16:04:24 +010065 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
Michal Schmidtff2ad302014-02-25 16:04:25 +010066
67 /* Reduce memory usage in kdump environment by using only one queue */
Amir Vadaic9931892014-08-25 16:06:54 +030068 if (is_kdump_kernel())
Michal Schmidtff2ad302014-02-25 16:04:25 +010069 nq = 1;
70
Michal Schmidt7d0445d2014-02-25 16:04:24 +010071 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
stephen hemmingera8f47eb2014-01-09 22:20:11 -080073}
74
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000075/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000076 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000083 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000085 * source onto the target. Update txdata pointers and related
86 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000092 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000096 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030098 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000099
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000106
Yuval Mintz34d56262013-08-28 01:13:01 +0300107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
Barak Witkowski15192a82012-06-19 07:48:28 +0000112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
Merav Sicron65565882012-06-19 07:48:26 +0000118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
Yuval Mintz4864a162013-01-10 04:53:39 +0000131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000135}
136
Ariel Elior8ca5e172013-01-01 05:22:34 +0000137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000162 }
163}
164
David S. Miller4b87f922013-01-15 15:05:59 -0500165/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000176 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000198{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100205 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
Merav Sicron51c1a582012-03-18 10:33:38 +0000210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000211 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000212
Ariel Elior6383c0b2011-07-14 08:31:57 +0000213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +0300231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000250 /* now free frags */
251 while (nbd > 0) {
252
Ariel Elior6383c0b2011-07-14 08:31:57 +0000253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000262 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
Yuval Mintze1615902015-08-10 12:49:35 +0300265 dev_kfree_skb_any(skb);
Tom Herbert2df1a702011-11-28 16:33:37 +0000266 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000267
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
Ariel Elior6383c0b2011-07-14 08:31:57 +0000274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000275{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000276 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000278 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
Ariel Elior6383c0b2011-07-14 08:31:57 +0000285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
Merav Sicron51c1a582012-03-18 10:33:38 +0000294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000297
Tom Herbert2df1a702011-11-28 16:33:37 +0000298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000299 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000300
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000301 sw_cons++;
302 }
303
Tom Herbert2df1a702011-11-28 16:33:37 +0000304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
Ariel Elior6383c0b2011-07-14 08:31:57 +0000306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317 */
318 smp_mb();
319
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000320 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000321 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000355{
356 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000374 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000376
377 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
Yuval Mintz2de67432013-01-23 03:21:43 +0000405/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000409 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800410 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000411{
Yuval Mintz2de67432013-01-23 03:21:43 +0000412 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000413 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
Eric Dumazete52fcb22011-11-14 06:05:34 +0000422 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000423 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800424 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000425 return 0;
426}
427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000429 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300430 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000439
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
Eric Dumazete52fcb22011-11-14 06:05:34 +0000444 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300445 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000446 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000453
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000456 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
460
Eric Dumazete52fcb22011-11-14 06:05:34 +0000461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000464 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000482 tpa_info->gro_size = gro_size;
483 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000488 fp->tpa_queue_used);
489#endif
490}
491
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000498 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000499 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000500 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000504 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000508 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000509 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000513{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000514 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300515 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000516 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000520 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300524 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000542}
543
Michal Schmidt996dedb2013-09-05 22:13:09 +0200544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000546{
Eric Dumazet1191cb82012-04-27 21:39:21 +0000547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000550 dma_addr_t mapping;
551
Eric Dumazetb9032742017-01-20 08:25:34 -0800552 if (!pool->page) {
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300553 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
Michal Schmidt5c9ffde2015-12-04 17:22:34 +0100554 if (unlikely(!pool->page))
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300555 return -ENOMEM;
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300556
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300557 pool->offset = 0;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000558 }
559
Michal Schmidt80316122015-06-26 17:50:00 +0200560 mapping = dma_map_page(&bp->pdev->dev, pool->page,
561 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 BNX2X_ERR("Can't map sge\n");
564 return -ENOMEM;
565 }
566
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300567 sw_buf->page = pool->page;
568 sw_buf->offset = pool->offset;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000569
Eric Dumazet1191cb82012-04-27 21:39:21 +0000570 dma_unmap_addr_set(sw_buf, mapping, mapping);
571
572 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300575 pool->offset += SGE_PAGE_SIZE;
Eric Dumazetb9032742017-01-20 08:25:34 -0800576 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577 get_page(pool->page);
578 else
579 pool->page = NULL;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000580 return 0;
581}
582
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000583static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000584 struct bnx2x_agg_info *tpa_info,
585 u16 pages,
586 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300587 struct eth_end_agg_rx_cqe *cqe,
588 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000589{
590 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000591 u32 i, frag_len, frag_size;
592 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300593 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000594 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000595
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300596 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000597
598 if (fp->mode == TPA_MODE_GRO) {
599 gro_size = tpa_info->gro_size;
600 full_page = tpa_info->full_page;
601 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000602
603 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000604 if (frag_size)
605 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000606 le16_to_cpu(cqe->pkt_len),
607 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000608
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000609#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000610 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000611 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300613 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000614 bnx2x_panic();
615 return -EINVAL;
616 }
617#endif
618
619 /* Run through the SGL and compose the fragmented skb */
620 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300621 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622
623 /* FW gives the indices of the SGE as if the ring is an array
624 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000625 if (fp->mode == TPA_MODE_GRO)
626 frag_len = min_t(u32, frag_size, (u32)full_page);
627 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000628 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000629
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000630 rx_pg = &fp->rx_page_ring[sge_idx];
631 old_rx_pg = *rx_pg;
632
633 /* If we fail to allocate a substitute page, we simply stop
634 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200635 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000636 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000637 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000638 return err;
639 }
640
Michal Schmidt80316122015-06-26 17:50:00 +0200641 dma_unmap_page(&bp->pdev->dev,
642 dma_unmap_addr(&old_rx_pg, mapping),
643 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000644 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000645 if (fp->mode == TPA_MODE_LRO)
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300646 skb_fill_page_desc(skb, j, old_rx_pg.page,
647 old_rx_pg.offset, frag_len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000648 else { /* GRO */
649 int rem;
650 int offset = 0;
651 for (rem = frag_len; rem > 0; rem -= gro_size) {
652 int len = rem > gro_size ? gro_size : rem;
653 skb_fill_page_desc(skb, frag_id++,
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300654 old_rx_pg.page,
655 old_rx_pg.offset + offset,
656 len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000657 if (offset)
658 get_page(old_rx_pg.page);
659 offset += len;
660 }
661 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000662
663 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000664 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000665 skb->len += frag_len;
666
667 frag_size -= frag_len;
668 }
669
670 return 0;
671}
672
Eric Dumazetd46d1322012-12-10 12:16:06 +0000673static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674{
675 if (fp->rx_frag_size)
Alexander Duycke51423d2015-05-06 21:12:31 -0700676 skb_free_frag(data);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000677 else
678 kfree(data);
679}
680
Michal Schmidt996dedb2013-09-05 22:13:09 +0200681static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000682{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200683 if (fp->rx_frag_size) {
684 /* GFP_KERNEL allocations are used only during initialization */
Mel Gormand0164ad2015-11-06 16:28:21 -0800685 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
Michal Schmidt996dedb2013-09-05 22:13:09 +0200686 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000687
Michal Schmidt996dedb2013-09-05 22:13:09 +0200688 return netdev_alloc_frag(fp->rx_frag_size);
689 }
690
691 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000692}
693
Yuval Mintz99690852013-01-14 05:11:49 +0000694#ifdef CONFIG_INET
695static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696{
697 const struct iphdr *iph = ip_hdr(skb);
698 struct tcphdr *th;
699
700 skb_set_transport_header(skb, sizeof(struct iphdr));
701 th = tcp_hdr(skb);
702
703 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704 iph->saddr, iph->daddr, 0);
705}
706
707static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708{
709 struct ipv6hdr *iph = ipv6_hdr(skb);
710 struct tcphdr *th;
711
712 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
713 th = tcp_hdr(skb);
714
715 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716 &iph->saddr, &iph->daddr, 0);
717}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000718
719static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720 void (*gro_func)(struct bnx2x*, struct sk_buff*))
721{
Zhang Shengju0e24c0a2016-12-02 09:51:04 +0800722 skb_reset_network_header(skb);
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000723 gro_func(bp, skb);
724 tcp_gro_complete(skb);
725}
Yuval Mintz99690852013-01-14 05:11:49 +0000726#endif
727
728static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
729 struct sk_buff *skb)
730{
731#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000732 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000733 switch (be16_to_cpu(skb->protocol)) {
734 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000735 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000736 break;
737 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000739 break;
740 default:
Michal Schmidt9adab1b2015-12-04 17:22:35 +0100741 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000742 be16_to_cpu(skb->protocol));
743 }
Yuval Mintz99690852013-01-14 05:11:49 +0000744 }
745#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700746 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000747 napi_gro_receive(&fp->napi, skb);
748}
749
Eric Dumazet1191cb82012-04-27 21:39:21 +0000750static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 struct bnx2x_agg_info *tpa_info,
752 u16 pages,
753 struct eth_end_agg_rx_cqe *cqe,
754 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000755{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300756 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000757 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300758 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000759 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000760 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300761 u8 old_tpa_state = tpa_info->tpa_state;
762
763 tpa_info->tpa_state = BNX2X_TPA_STOP;
764
765 /* If we there was an error during the handling of the TPA_START -
766 * drop this aggregation.
767 */
768 if (old_tpa_state == BNX2X_TPA_ERROR)
769 goto drop;
770
Eric Dumazete52fcb22011-11-14 06:05:34 +0000771 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200772 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000773 /* Unmap skb in the pool anyway, as we are going to change
774 pool entry status to BNX2X_TPA_STOP even if new skb allocation
775 fails. */
776 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800777 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000778 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000779 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000780
Eric Dumazete52fcb22011-11-14 06:05:34 +0000781 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000782#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800783 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000784 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800785 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000786 bnx2x_panic();
787 return;
788 }
789#endif
790
Eric Dumazete52fcb22011-11-14 06:05:34 +0000791 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000792 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800793 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000794
795 skb->protocol = eth_type_trans(skb, bp->dev);
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
797
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000798 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300800 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000802 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000803 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000806 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000807 }
808
Eric Dumazete52fcb22011-11-14 06:05:34 +0000809 /* put new data in bin */
810 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000811
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300812 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000813 }
Eric Dumazet07b0f002014-06-26 00:44:02 -0700814 if (new_data)
815 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300816drop:
817 /* drop the packet and keep the buffer in the bin */
818 DP(NETIF_MSG_RX_STATUS,
819 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000820 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000821}
822
Michal Schmidt996dedb2013-09-05 22:13:09 +0200823static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000825{
826 u8 *data;
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 dma_addr_t mapping;
830
Michal Schmidt996dedb2013-09-05 22:13:09 +0200831 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000832 if (unlikely(data == NULL))
833 return -ENOMEM;
834
835 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
836 fp->rx_buf_size,
837 DMA_FROM_DEVICE);
838 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000839 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000840 BNX2X_ERR("Can't map rx data\n");
841 return -ENOMEM;
842 }
843
844 rx_buf->data = data;
845 dma_unmap_addr_set(rx_buf, mapping, mapping);
846
847 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
849
850 return 0;
851}
852
Barak Witkowski15192a82012-06-19 07:48:28 +0000853static
854void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855 struct bnx2x_fastpath *fp,
856 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000857{
Michal Schmidte4889212012-09-13 12:59:44 +0000858 /* Do nothing if no L4 csum validation was done.
859 * We do not check whether IP csum was validated. For IPv4 we assume
860 * that if the card got as far as validating the L4 csum, it also
861 * validated the IP csum. IPv6 has no IP csum.
862 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000863 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000864 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000865 return;
866
Michal Schmidte4889212012-09-13 12:59:44 +0000867 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000868
869 if (cqe->fast_path_cqe.type_error_flags &
870 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000872 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000873 else
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
875}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000876
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800877static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000878{
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300881 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000882 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300883 union eth_rx_cqe *cqe;
884 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000885
886#ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
888 return 0;
889#endif
Eric W. Biedermanb3529742014-03-14 17:57:59 -0700890 if (budget <= 0)
891 return rx_pkt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 bd_prod_fw = bd_prod;
896 sw_comp_cons = fp->rx_comp_cons;
897 sw_comp_prod = fp->rx_comp_prod;
898
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300899 comp_ring_cons = RCQ_BD(sw_comp_cons);
900 cqe = &fp->rx_comp_ring[comp_ring_cons];
901 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000902
903 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300904 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000905
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300906 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000907 struct sw_rx_bd *rx_buf = NULL;
908 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000909 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300910 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000911 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000912 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800913 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800914 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300916#ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
918 return 0;
919#endif
920
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000921 bd_prod = RX_BD(bd_prod);
922 bd_cons = RX_BD(bd_cons);
923
wenxiong@linux.vnet.ibm.com9aaae042014-06-03 14:14:46 -0500924 /* A rmb() is required to ensure that the CQE is not read
925 * before it is written by the adapter DMA. PCI ordering
926 * rules will make sure the other fields are written before
927 * the marker at the end of struct eth_fast_path_rx_cqe
928 * but without rmb() a weakly ordered processor can process
929 * stale data. Without the barrier TPA state-machine might
930 * enter inconsistent state and kernel stack might be
931 * provided with incorrect packet description - these lead
932 * to various kernel crashed.
933 */
934 rmb();
935
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300936 cqe_fp_flags = cqe_fp->type_error_flags;
937 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000938
Merav Sicron51c1a582012-03-18 10:33:38 +0000939 DP(NETIF_MSG_RX_STATUS,
940 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
941 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300942 cqe_fp_flags, cqe_fp->status_flags,
943 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000944 le16_to_cpu(cqe_fp->vlan_tag),
945 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000946
947 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300948 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000949 bnx2x_sp_event(fp, cqe);
950 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000951 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000952
Eric Dumazete52fcb22011-11-14 06:05:34 +0000953 rx_buf = &fp->rx_buf_ring[bd_cons];
954 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000955
Eric Dumazete52fcb22011-11-14 06:05:34 +0000956 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000957 struct bnx2x_agg_info *tpa_info;
958 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300959#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 /* sanity check */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200961 if (fp->mode == TPA_MODE_DISABLED &&
Eric Dumazete52fcb22011-11-14 06:05:34 +0000962 (CQE_TYPE_START(cqe_fp_type) ||
963 CQE_TYPE_STOP(cqe_fp_type)))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200964 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000965 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300966#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000967
Eric Dumazete52fcb22011-11-14 06:05:34 +0000968 if (CQE_TYPE_START(cqe_fp_type)) {
969 u16 queue = cqe_fp->queue_index;
970 DP(NETIF_MSG_RX_STATUS,
971 "calling tpa_start on queue %d\n",
972 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000973
Eric Dumazete52fcb22011-11-14 06:05:34 +0000974 bnx2x_tpa_start(fp, queue,
975 bd_cons, bd_prod,
976 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000977
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000978 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000979 }
980 queue = cqe->end_agg_cqe.queue_index;
981 tpa_info = &fp->tpa_info[queue];
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_stop on queue %d\n",
984 queue);
985
986 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
987 tpa_info->len_on_bd;
988
989 if (fp->mode == TPA_MODE_GRO)
990 pages = (frag_size + tpa_info->full_page - 1) /
991 tpa_info->full_page;
992 else
993 pages = SGE_PAGE_ALIGN(frag_size) >>
994 SGE_PAGE_SHIFT;
995
996 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000998#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000999 if (bp->panic)
1000 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001#endif
1002
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001003 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1004 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001005 }
1006 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001007 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001008 pad = cqe_fp->placement_offset;
1009 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +00001011 pad + RX_COPY_THRESH,
1012 DMA_FROM_DEVICE);
1013 pad += NET_SKB_PAD;
1014 prefetch(data + pad); /* speedup eth_type_trans() */
1015 /* is this an error packet? */
1016 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001018 "ERROR flags %x rx packet %u\n",
1019 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +00001020 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001021 goto reuse_rx;
1022 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001023
Eric Dumazete52fcb22011-11-14 06:05:34 +00001024 /* Since we don't have a jumbo ring
1025 * copy small packets if mtu > 1500
1026 */
1027 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028 (len <= RX_COPY_THRESH)) {
Alexander Duyck45abfb12014-12-09 19:41:17 -08001029 skb = napi_alloc_skb(&fp->napi, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001030 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001031 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001032 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001033 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001034 goto reuse_rx;
1035 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001036 memcpy(skb->data, data + pad, len);
1037 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1038 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001039 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001042 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001043 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001044 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001045 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001046 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001047 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001048 bnx2x_fp_qstats(bp, fp)->
1049 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001050 goto next_rx;
1051 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001052 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001053 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001054 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001056 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001057reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001058 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001059 goto next_rx;
1060 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001061 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001062
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001063 skb_put(skb, len);
1064 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001065
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001066 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001067 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001069
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001070 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001071
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001072 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001073 bnx2x_csum_validate(skb, cqe, fp,
1074 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001076 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077
Michal Kalderoneeed0182014-08-17 16:47:44 +03001078 /* Check if this packet was timestamped */
Yuval Mintz56daf662014-08-28 08:07:32 +03001079 if (unlikely(cqe->fast_path_cqe.type_error_flags &
Michal Kalderoneeed0182014-08-17 16:47:44 +03001080 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081 bnx2x_set_rx_ts(bp, skb);
1082
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001083 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1084 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001086 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001087
Eric Dumazetb59768c2015-11-18 06:30:57 -08001088 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001090 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001091
1092 bd_cons = NEXT_RX_IDX(bd_cons);
1093 bd_prod = NEXT_RX_IDX(bd_prod);
1094 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1095 rx_pkt++;
1096next_cqe:
1097 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1099
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001100 /* mark CQE as free */
1101 BNX2X_SEED_CQE(cqe_fp);
1102
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001103 if (rx_pkt == budget)
1104 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001105
1106 comp_ring_cons = RCQ_BD(sw_comp_cons);
1107 cqe = &fp->rx_comp_ring[comp_ring_cons];
1108 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001109 } /* while */
1110
1111 fp->rx_bd_cons = bd_cons;
1112 fp->rx_bd_prod = bd_prod_fw;
1113 fp->rx_comp_cons = sw_comp_cons;
1114 fp->rx_comp_prod = sw_comp_prod;
1115
1116 /* Update producers */
1117 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1118 fp->rx_sge_prod);
1119
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001120 return rx_pkt;
1121}
1122
1123static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1124{
1125 struct bnx2x_fastpath *fp = fp_cookie;
1126 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001127 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001128
Merav Sicron51c1a582012-03-18 10:33:38 +00001129 DP(NETIF_MSG_INTR,
1130 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001131 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001132
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001133 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001134
1135#ifdef BNX2X_STOP_ON_ERROR
1136 if (unlikely(bp->panic))
1137 return IRQ_HANDLED;
1138#endif
1139
1140 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001141 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001142 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001143
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001144 prefetch(&fp->sb_running_index[SM_RX_ID]);
Eric Dumazetf5fbf112014-10-29 17:07:50 -07001145 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001146
1147 return IRQ_HANDLED;
1148}
1149
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001150/* HW Lock for shared dual port PHYs */
1151void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1152{
1153 mutex_lock(&bp->port.phy_mutex);
1154
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001155 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001156}
1157
1158void bnx2x_release_phy_lock(struct bnx2x *bp)
1159{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001161
1162 mutex_unlock(&bp->port.phy_mutex);
1163}
1164
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001165/* calculates MF speed according to current linespeed and MF configuration */
1166u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1167{
1168 u16 line_speed = bp->link_vars.line_speed;
1169 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001170 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171 bp->mf_config[BP_VN(bp)]);
1172
1173 /* Calculate the current MAX line speed limit for the MF
1174 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001175 */
Yuval Mintzda3cc2d2015-08-17 08:28:25 +03001176 if (IS_MF_PERCENT_BW(bp))
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001177 line_speed = (line_speed * maxCfg) / 100;
1178 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001179 u16 vn_max_rate = maxCfg * 100;
1180
1181 if (vn_max_rate < line_speed)
1182 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001183 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001184 }
1185
1186 return line_speed;
1187}
1188
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001189/**
1190 * bnx2x_fill_report_data - fill link report data to report
1191 *
1192 * @bp: driver handle
1193 * @data: link state to update
1194 *
1195 * It uses a none-atomic bit operations because is called under the mutex.
1196 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001197static void bnx2x_fill_report_data(struct bnx2x *bp,
1198 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001199{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001200 memset(data, 0, sizeof(*data));
1201
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001202 if (IS_PF(bp)) {
1203 /* Fill the report data: effective line speed */
1204 data->line_speed = bnx2x_get_mf_speed(bp);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001205
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001206 /* Link is down */
1207 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001210
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001211 if (!BNX2X_NUM_ETH_QUEUES(bp))
1212 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001214
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001215 /* Full DUPLEX */
1216 if (bp->link_vars.duplex == DUPLEX_FULL)
1217 __set_bit(BNX2X_LINK_REPORT_FD,
1218 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001219
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001220 /* Rx Flow Control is ON */
1221 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223 &data->link_report_flags);
1224
1225 /* Tx Flow Control is ON */
1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228 &data->link_report_flags);
1229 } else { /* VF */
1230 *data = bp->vf_link_vars;
1231 }
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001232}
1233
1234/**
1235 * bnx2x_link_report - report link status to OS.
1236 *
1237 * @bp: driver handle
1238 *
1239 * Calls the __bnx2x_link_report() under the same locking scheme
1240 * as a link/PHY state managing code to ensure a consistent link
1241 * reporting.
1242 */
1243
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001244void bnx2x_link_report(struct bnx2x *bp)
1245{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001246 bnx2x_acquire_phy_lock(bp);
1247 __bnx2x_link_report(bp);
1248 bnx2x_release_phy_lock(bp);
1249}
1250
1251/**
1252 * __bnx2x_link_report - report link status to OS.
1253 *
1254 * @bp: driver handle
1255 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001256 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001257 * Should be called under the phy_lock.
1258 */
1259void __bnx2x_link_report(struct bnx2x *bp)
1260{
1261 struct bnx2x_link_report_data cur_data;
1262
1263 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001264 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001265 bnx2x_read_mf_cfg(bp);
1266
1267 /* Read the current link report info */
1268 bnx2x_fill_report_data(bp, &cur_data);
1269
1270 /* Don't report link down or exactly the same link status twice */
1271 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1272 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1273 &bp->last_reported_link.link_report_flags) &&
1274 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1275 &cur_data.link_report_flags)))
1276 return;
1277
1278 bp->link_cnt++;
1279
1280 /* We are going to report a new link parameters now -
1281 * remember the current data for the next time.
1282 */
1283 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1284
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001285 /* propagate status to VFs */
1286 if (IS_PF(bp))
1287 bnx2x_iov_link_update(bp);
1288
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001289 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001291 netif_carrier_off(bp->dev);
1292 netdev_err(bp->dev, "NIC Link is Down\n");
1293 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001294 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001295 const char *duplex;
1296 const char *flow;
1297
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001298 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001299
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001300 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1301 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001302 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001303 else
Joe Perches94f05b02011-08-14 12:16:20 +00001304 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001305
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001306 /* Handle the FC at the end so that only these flags would be
1307 * possibly set. This way we may easily check if there is no FC
1308 * enabled.
1309 */
1310 if (cur_data.link_report_flags) {
1311 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1312 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001313 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1314 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001315 flow = "ON - receive & transmit";
1316 else
1317 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001318 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001319 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320 }
Joe Perches94f05b02011-08-14 12:16:20 +00001321 } else {
1322 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001323 }
Joe Perches94f05b02011-08-14 12:16:20 +00001324 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1325 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001326 }
1327}
1328
Eric Dumazet1191cb82012-04-27 21:39:21 +00001329static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1330{
1331 int i;
1332
1333 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1334 struct eth_rx_sge *sge;
1335
1336 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1337 sge->addr_hi =
1338 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1339 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340
1341 sge->addr_lo =
1342 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1344 }
1345}
1346
1347static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1348 struct bnx2x_fastpath *fp, int last)
1349{
1350 int i;
1351
1352 for (i = 0; i < last; i++) {
1353 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1354 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1355 u8 *data = first_buf->data;
1356
1357 if (data == NULL) {
1358 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1359 continue;
1360 }
1361 if (tpa_info->tpa_state == BNX2X_TPA_START)
1362 dma_unmap_single(&bp->pdev->dev,
1363 dma_unmap_addr(first_buf, mapping),
1364 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001365 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001366 first_buf->data = NULL;
1367 }
1368}
1369
Merav Sicron55c11942012-11-07 00:45:48 +00001370void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1371{
1372 int j;
1373
1374 for_each_rx_queue_cnic(bp, j) {
1375 struct bnx2x_fastpath *fp = &bp->fp[j];
1376
1377 fp->rx_bd_cons = 0;
1378
1379 /* Activate BD ring */
1380 /* Warning!
1381 * this will generate an interrupt (to the TSTORM)
1382 * must only be done after chip is initialized
1383 */
1384 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1385 fp->rx_sge_prod);
1386 }
1387}
1388
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001389void bnx2x_init_rx_rings(struct bnx2x *bp)
1390{
1391 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001392 u16 ring_prod;
1393 int i, j;
1394
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001395 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001396 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001397 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001399 DP(NETIF_MSG_IFUP,
1400 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1401
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001402 if (fp->mode != TPA_MODE_DISABLED) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001403 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001404 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001405 struct bnx2x_agg_info *tpa_info =
1406 &fp->tpa_info[i];
1407 struct sw_rx_bd *first_buf =
1408 &tpa_info->first_buf;
1409
Michal Schmidt996dedb2013-09-05 22:13:09 +02001410 first_buf->data =
1411 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001412 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001413 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1414 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001415 bnx2x_free_tpa_pool(bp, fp, i);
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001416 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001417 break;
1418 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001419 dma_unmap_addr_set(first_buf, mapping, 0);
1420 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001421 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001422
1423 /* "next page" elements initialization */
1424 bnx2x_set_next_page_sgl(fp);
1425
1426 /* set SGEs bit mask */
1427 bnx2x_init_sge_ring_bit_mask(fp);
1428
1429 /* Allocate SGEs and initialize the ring elements */
1430 for (i = 0, ring_prod = 0;
1431 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1432
Michal Schmidt996dedb2013-09-05 22:13:09 +02001433 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1434 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001435 BNX2X_ERR("was only able to allocate %d rx sges\n",
1436 i);
1437 BNX2X_ERR("disabling TPA for queue[%d]\n",
1438 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001439 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001440 bnx2x_free_rx_sge_range(bp, fp,
1441 ring_prod);
1442 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001443 MAX_AGG_QS(bp));
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001444 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001445 ring_prod = 0;
1446 break;
1447 }
1448 ring_prod = NEXT_SGE_IDX(ring_prod);
1449 }
1450
1451 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001452 }
1453 }
1454
Merav Sicron55c11942012-11-07 00:45:48 +00001455 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001456 struct bnx2x_fastpath *fp = &bp->fp[j];
1457
1458 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001459
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001460 /* Activate BD ring */
1461 /* Warning!
1462 * this will generate an interrupt (to the TSTORM)
1463 * must only be done after chip is initialized
1464 */
1465 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1466 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001467
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001468 if (j != 0)
1469 continue;
1470
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001471 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001472 REG_WR(bp, BAR_USTRORM_INTMEM +
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1474 U64_LO(fp->rx_comp_mapping));
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
1476 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1477 U64_HI(fp->rx_comp_mapping));
1478 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001479 }
1480}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001481
Merav Sicron55c11942012-11-07 00:45:48 +00001482static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1483{
1484 u8 cos;
1485 struct bnx2x *bp = fp->bp;
1486
1487 for_each_cos_in_tx_queue(fp, cos) {
1488 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1489 unsigned pkts_compl = 0, bytes_compl = 0;
1490
1491 u16 sw_prod = txdata->tx_pkt_prod;
1492 u16 sw_cons = txdata->tx_pkt_cons;
1493
1494 while (sw_cons != sw_prod) {
1495 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1496 &pkts_compl, &bytes_compl);
1497 sw_cons++;
1498 }
1499
1500 netdev_tx_reset_queue(
1501 netdev_get_tx_queue(bp->dev,
1502 txdata->txq_index));
1503 }
1504}
1505
1506static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1507{
1508 int i;
1509
1510 for_each_tx_queue_cnic(bp, i) {
1511 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1512 }
1513}
1514
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001515static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1516{
1517 int i;
1518
Merav Sicron55c11942012-11-07 00:45:48 +00001519 for_each_eth_queue(bp, i) {
1520 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521 }
1522}
1523
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001524static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1525{
1526 struct bnx2x *bp = fp->bp;
1527 int i;
1528
1529 /* ring wasn't allocated */
1530 if (fp->rx_buf_ring == NULL)
1531 return;
1532
1533 for (i = 0; i < NUM_RX_BD; i++) {
1534 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001535 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001536
Eric Dumazete52fcb22011-11-14 06:05:34 +00001537 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001538 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001539 dma_unmap_single(&bp->pdev->dev,
1540 dma_unmap_addr(rx_buf, mapping),
1541 fp->rx_buf_size, DMA_FROM_DEVICE);
1542
Eric Dumazete52fcb22011-11-14 06:05:34 +00001543 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001544 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001545 }
1546}
1547
Merav Sicron55c11942012-11-07 00:45:48 +00001548static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1549{
1550 int j;
1551
1552 for_each_rx_queue_cnic(bp, j) {
1553 bnx2x_free_rx_bds(&bp->fp[j]);
1554 }
1555}
1556
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001557static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1558{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001559 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001560
Merav Sicron55c11942012-11-07 00:45:48 +00001561 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001562 struct bnx2x_fastpath *fp = &bp->fp[j];
1563
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001564 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001565
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001566 if (fp->mode != TPA_MODE_DISABLED)
David S. Miller8decf862011-09-22 03:23:13 -04001567 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001568 }
1569}
1570
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001571static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001572{
1573 bnx2x_free_tx_skbs_cnic(bp);
1574 bnx2x_free_rx_skbs_cnic(bp);
1575}
1576
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001577void bnx2x_free_skbs(struct bnx2x *bp)
1578{
1579 bnx2x_free_tx_skbs(bp);
1580 bnx2x_free_rx_skbs(bp);
1581}
1582
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001583void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1584{
1585 /* load old values */
1586 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1587
1588 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1589 /* leave all but MAX value */
1590 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1591
1592 /* set new MAX value */
1593 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1594 & FUNC_MF_CFG_MAX_BW_MASK;
1595
1596 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1597 }
1598}
1599
Dmitry Kravkovca924292011-06-14 01:33:08 +00001600/**
1601 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1602 *
1603 * @bp: driver handle
1604 * @nvecs: number of vectors to be released
1605 */
1606static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001607{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001608 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001609
Dmitry Kravkovca924292011-06-14 01:33:08 +00001610 if (nvecs == offset)
1611 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001612
1613 /* VFs don't have a default SB */
1614 if (IS_PF(bp)) {
1615 free_irq(bp->msix_table[offset].vector, bp->dev);
1616 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1617 bp->msix_table[offset].vector);
1618 offset++;
1619 }
Merav Sicron55c11942012-11-07 00:45:48 +00001620
1621 if (CNIC_SUPPORT(bp)) {
1622 if (nvecs == offset)
1623 return;
1624 offset++;
1625 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001626
Dmitry Kravkovca924292011-06-14 01:33:08 +00001627 for_each_eth_queue(bp, i) {
1628 if (nvecs == offset)
1629 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001630 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1631 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001632
1633 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001634 }
1635}
1636
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001637void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001638{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001639 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001640 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1641 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1642
1643 /* vfs don't have a default status block */
1644 if (IS_PF(bp))
1645 nvecs++;
1646
1647 bnx2x_free_msix_irqs(bp, nvecs);
1648 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001649 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001650 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001651}
1652
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001653int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001654{
Ariel Elior1ab44342013-01-01 05:22:23 +00001655 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001656
Ariel Elior1ab44342013-01-01 05:22:23 +00001657 /* VFs don't have a default status block */
1658 if (IS_PF(bp)) {
1659 bp->msix_table[msix_vec].entry = msix_vec;
1660 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1661 bp->msix_table[0].entry);
1662 msix_vec++;
1663 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001664
Merav Sicron55c11942012-11-07 00:45:48 +00001665 /* Cnic requires an msix vector for itself */
1666 if (CNIC_SUPPORT(bp)) {
1667 bp->msix_table[msix_vec].entry = msix_vec;
1668 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1669 msix_vec, bp->msix_table[msix_vec].entry);
1670 msix_vec++;
1671 }
1672
Ariel Elior6383c0b2011-07-14 08:31:57 +00001673 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001674 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001675 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001676 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1677 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001678 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001679 }
1680
Ariel Elior1ab44342013-01-01 05:22:23 +00001681 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1682 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001683
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001684 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1685 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001686 /*
1687 * reconfigure number of tx/rx queues according to available
1688 * MSI-X vectors
1689 */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001690 if (rc == -ENOSPC) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001691 /* Get by with single vector */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001692 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1693 if (rc < 0) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001694 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1695 rc);
1696 goto no_msix;
1697 }
1698
1699 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1700 bp->flags |= USING_SINGLE_MSIX_FLAG;
1701
Merav Sicron55c11942012-11-07 00:45:48 +00001702 BNX2X_DEV_INFO("set number of queues to 1\n");
1703 bp->num_ethernet_queues = 1;
1704 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001705 } else if (rc < 0) {
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001706 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001707 goto no_msix;
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001708 } else if (rc < msix_vec) {
1709 /* how less vectors we will have? */
1710 int diff = msix_vec - rc;
1711
1712 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1713
1714 /*
1715 * decrease number of queues by number of unallocated entries
1716 */
1717 bp->num_ethernet_queues -= diff;
1718 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1719
1720 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1721 bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001722 }
1723
1724 bp->flags |= USING_MSIX_FLAG;
1725
1726 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001727
1728no_msix:
1729 /* fall to INTx if not enough memory */
1730 if (rc == -ENOMEM)
1731 bp->flags |= DISABLE_MSI_FLAG;
1732
1733 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001734}
1735
1736static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1737{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001738 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001739
Ariel Eliorad5afc82013-01-01 05:22:26 +00001740 /* no default status block for vf */
1741 if (IS_PF(bp)) {
1742 rc = request_irq(bp->msix_table[offset++].vector,
1743 bnx2x_msix_sp_int, 0,
1744 bp->dev->name, bp->dev);
1745 if (rc) {
1746 BNX2X_ERR("request sp irq failed\n");
1747 return -EBUSY;
1748 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001749 }
1750
Merav Sicron55c11942012-11-07 00:45:48 +00001751 if (CNIC_SUPPORT(bp))
1752 offset++;
1753
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001754 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001755 struct bnx2x_fastpath *fp = &bp->fp[i];
1756 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1757 bp->dev->name, i);
1758
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001759 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001760 bnx2x_msix_fp_int, 0, fp->name, fp);
1761 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001762 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1763 bp->msix_table[offset].vector, rc);
1764 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001765 return -EBUSY;
1766 }
1767
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001768 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001769 }
1770
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001771 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001772 if (IS_PF(bp)) {
1773 offset = 1 + CNIC_SUPPORT(bp);
1774 netdev_info(bp->dev,
1775 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1776 bp->msix_table[0].vector,
1777 0, bp->msix_table[offset].vector,
1778 i - 1, bp->msix_table[offset + i - 1].vector);
1779 } else {
1780 offset = CNIC_SUPPORT(bp);
1781 netdev_info(bp->dev,
1782 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1783 0, bp->msix_table[offset].vector,
1784 i - 1, bp->msix_table[offset + i - 1].vector);
1785 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001786 return 0;
1787}
1788
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001789int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001790{
1791 int rc;
1792
1793 rc = pci_enable_msi(bp->pdev);
1794 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001795 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001796 return -1;
1797 }
1798 bp->flags |= USING_MSI_FLAG;
1799
1800 return 0;
1801}
1802
1803static int bnx2x_req_irq(struct bnx2x *bp)
1804{
1805 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001806 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001807
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001808 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001809 flags = 0;
1810 else
1811 flags = IRQF_SHARED;
1812
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001813 if (bp->flags & USING_MSIX_FLAG)
1814 irq = bp->msix_table[0].vector;
1815 else
1816 irq = bp->pdev->irq;
1817
1818 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001819}
1820
Yuval Mintzc957d092013-06-25 08:50:11 +03001821static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001822{
1823 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001824 if (bp->flags & USING_MSIX_FLAG &&
1825 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001826 rc = bnx2x_req_msix_irqs(bp);
1827 if (rc)
1828 return rc;
1829 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001830 rc = bnx2x_req_irq(bp);
1831 if (rc) {
1832 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1833 return rc;
1834 }
1835 if (bp->flags & USING_MSI_FLAG) {
1836 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001837 netdev_info(bp->dev, "using MSI IRQ %d\n",
1838 bp->dev->irq);
1839 }
1840 if (bp->flags & USING_MSIX_FLAG) {
1841 bp->dev->irq = bp->msix_table[0].vector;
1842 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1843 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001844 }
1845 }
1846
1847 return 0;
1848}
1849
Merav Sicron55c11942012-11-07 00:45:48 +00001850static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1851{
1852 int i;
1853
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001854 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001855 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001856 }
Merav Sicron55c11942012-11-07 00:45:48 +00001857}
1858
Eric Dumazet1191cb82012-04-27 21:39:21 +00001859static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001860{
1861 int i;
1862
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001863 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001864 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001865 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001866}
1867
Merav Sicron55c11942012-11-07 00:45:48 +00001868static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1869{
1870 int i;
1871
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001872 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001873 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001874 }
Merav Sicron55c11942012-11-07 00:45:48 +00001875}
1876
Eric Dumazet1191cb82012-04-27 21:39:21 +00001877static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001878{
1879 int i;
1880
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001881 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001882 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001883 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001884}
1885
1886void bnx2x_netif_start(struct bnx2x *bp)
1887{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001888 if (netif_running(bp->dev)) {
1889 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001890 if (CNIC_LOADED(bp))
1891 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001892 bnx2x_int_enable(bp);
1893 if (bp->state == BNX2X_STATE_OPEN)
1894 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001895 }
1896}
1897
1898void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1899{
1900 bnx2x_int_disable_sync(bp, disable_hw);
1901 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001902 if (CNIC_LOADED(bp))
1903 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001904}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001905
Jason Wangf663dd92014-01-10 16:18:26 +08001906u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001907 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001908{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001909 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001910
Merav Sicron55c11942012-11-07 00:45:48 +00001911 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001912 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1913 u16 ether_type = ntohs(hdr->h_proto);
1914
1915 /* Skip VLAN tag if present */
1916 if (ether_type == ETH_P_8021Q) {
1917 struct vlan_ethhdr *vhdr =
1918 (struct vlan_ethhdr *)skb->data;
1919
1920 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1921 }
1922
1923 /* If ethertype is FCoE or FIP - use FCoE ring */
1924 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001925 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001926 }
Merav Sicron55c11942012-11-07 00:45:48 +00001927
David S. Miller823dcd22011-08-20 10:39:12 -07001928 /* select a non-FCoE queue */
Mintz, Yuval3968d382017-06-01 15:57:56 +03001929 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001930}
1931
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001932void bnx2x_set_num_queues(struct bnx2x *bp)
1933{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001934 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001935 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001936
Barak Witkowskia3348722012-04-23 03:04:46 +00001937 /* override in STORAGE SD modes */
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03001938 if (IS_MF_STORAGE_ONLY(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001939 bp->num_ethernet_queues = 1;
1940
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001941 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001942 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1943 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001944
1945 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001946}
1947
David S. Miller823dcd22011-08-20 10:39:12 -07001948/**
1949 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1950 *
1951 * @bp: Driver handle
1952 *
1953 * We currently support for at most 16 Tx queues for each CoS thus we will
1954 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1955 * bp->max_cos.
1956 *
1957 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1958 * index after all ETH L2 indices.
1959 *
1960 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1961 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001962 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001963 *
1964 * The proper configuration of skb->queue_mapping is handled by
1965 * bnx2x_select_queue() and __skb_tx_hash().
1966 *
1967 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1968 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1969 */
Merav Sicron55c11942012-11-07 00:45:48 +00001970static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001971{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001972 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001973
Merav Sicron65565882012-06-19 07:48:26 +00001974 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001975 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001976
1977/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001978 if (include_cnic && !NO_FCOE(bp)) {
1979 rx++;
1980 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001981 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001982
1983 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1984 if (rc) {
1985 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1986 return rc;
1987 }
1988 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1989 if (rc) {
1990 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1991 return rc;
1992 }
1993
Merav Sicron51c1a582012-03-18 10:33:38 +00001994 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001995 tx, rx);
1996
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001997 return rc;
1998}
1999
Eric Dumazet1191cb82012-04-27 21:39:21 +00002000static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002001{
2002 int i;
2003
2004 for_each_queue(bp, i) {
2005 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00002006 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002007
2008 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2009 if (IS_FCOE_IDX(i))
2010 /*
2011 * Although there are no IP frames expected to arrive to
2012 * this ring we still want to add an
2013 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2014 * overrun attack.
2015 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00002016 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002017 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00002018 mtu = bp->dev->mtu;
2019 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2020 IP_HEADER_ALIGNMENT_PADDING +
Jarod Wilsone1c6dcc2016-10-17 15:54:04 -04002021 ETH_OVERHEAD +
Eric Dumazete52fcb22011-11-14 06:05:34 +00002022 mtu +
2023 BNX2X_FW_RX_ALIGN_END;
Scott Wood9b70de62017-04-28 19:17:41 -05002024 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002025 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00002026 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2027 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2028 else
2029 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002030 }
2031}
2032
Ariel Elior60cad4e2013-09-04 14:09:22 +03002033static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002034{
2035 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002036 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2037
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002038 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002039 * enabled
2040 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002041 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2042 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002043 bp->fp->cl_id +
2044 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002045
2046 /*
2047 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2048 * per-port, so if explicit configuration is needed , do it only
2049 * for a PMF.
2050 *
2051 * For 57712 and newer on the other hand it's a per-function
2052 * configuration.
2053 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002054 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002055}
2056
Ariel Elior60cad4e2013-09-04 14:09:22 +03002057int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2058 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059{
Yuval Mintz3b603062012-03-18 10:33:39 +00002060 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002061
2062 /* Although RSS is meaningless when there is a single HW queue we
2063 * still need it enabled in order to have HW Rx hash generated.
2064 *
2065 * if (!is_eth_multi(bp))
2066 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2067 */
2068
Dmitry Kravkov96305232012-04-03 18:41:30 +00002069 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002070
2071 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2072
Ariel Elior60cad4e2013-09-04 14:09:22 +03002073 if (enable) {
2074 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002075
Ariel Elior60cad4e2013-09-04 14:09:22 +03002076 /* RSS configuration */
2077 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2078 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2079 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2081 if (rss_obj->udp_rss_v4)
2082 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2083 if (rss_obj->udp_rss_v6)
2084 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002085
Yuval Mintz28311f82015-07-22 09:16:22 +03002086 if (!CHIP_IS_E1x(bp)) {
2087 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2088 __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2089 __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2090
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002091 /* valid only for TUNN_MODE_GRE tunnel mode */
Yuval Mintz28311f82015-07-22 09:16:22 +03002092 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2093 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002094 } else {
2095 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2096 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002097
Dmitry Kravkov96305232012-04-03 18:41:30 +00002098 /* Hash bits */
2099 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002100
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002101 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002102
Dmitry Kravkov96305232012-04-03 18:41:30 +00002103 if (config_hash) {
2104 /* RSS keys */
Eric Dumazete3ec69c2014-11-16 06:23:07 -08002105 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002106 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002107 }
2108
Ariel Elior60cad4e2013-09-04 14:09:22 +03002109 if (IS_PF(bp))
2110 return bnx2x_config_rss(bp, &params);
2111 else
2112 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002113}
2114
Eric Dumazet1191cb82012-04-27 21:39:21 +00002115static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002116{
Yuval Mintz3b603062012-03-18 10:33:39 +00002117 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002118
2119 /* Prepare parameters for function state transitions */
2120 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2121
2122 func_params.f_obj = &bp->func_obj;
2123 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2124
2125 func_params.params.hw_init.load_phase = load_code;
2126
2127 return bnx2x_func_state_change(bp, &func_params);
2128}
2129
2130/*
2131 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002132 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002133 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002134void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135{
2136 int rc;
2137 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002138 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002139 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002140
2141 /***************** Cleanup MACs' object first *************************/
2142
2143 /* Wait for completion of requested */
2144 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2145 /* Perform a dry cleanup */
2146 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2147
2148 /* Clean ETH primary MAC */
2149 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002150 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002151 &ramrod_flags);
2152 if (rc != 0)
2153 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2154
2155 /* Cleanup UC list */
2156 vlan_mac_flags = 0;
2157 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2158 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2159 &ramrod_flags);
2160 if (rc != 0)
2161 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2162
2163 /***************** Now clean mcast object *****************************/
2164 rparam.mcast_obj = &bp->mcast_obj;
2165 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2166
Yuval Mintz8b09be52013-08-01 17:30:59 +03002167 /* Add a DEL command... - Since we're doing a driver cleanup only,
2168 * we take a lock surrounding both the initial send and the CONTs,
2169 * as we don't want a true completion to disrupt us in the middle.
2170 */
2171 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002172 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2173 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002174 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2175 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002176
2177 /* ...and wait until all pending commands are cleared */
2178 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2179 while (rc != 0) {
2180 if (rc < 0) {
2181 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2182 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002183 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002184 return;
2185 }
2186
2187 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002189 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002190}
2191
2192#ifndef BNX2X_STOP_ON_ERROR
2193#define LOAD_ERROR_EXIT(bp, label) \
2194 do { \
2195 (bp)->state = BNX2X_STATE_ERROR; \
2196 goto label; \
2197 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002198
2199#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2200 do { \
2201 bp->cnic_loaded = false; \
2202 goto label; \
2203 } while (0)
2204#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002205#define LOAD_ERROR_EXIT(bp, label) \
2206 do { \
2207 (bp)->state = BNX2X_STATE_ERROR; \
2208 (bp)->panic = 1; \
2209 return -EBUSY; \
2210 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002211#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2212 do { \
2213 bp->cnic_loaded = false; \
2214 (bp)->panic = 1; \
2215 return -EBUSY; \
2216 } while (0)
2217#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002218
Ariel Eliorad5afc82013-01-01 05:22:26 +00002219static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002220{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002221 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2222 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2223 return;
2224}
Yuval Mintz452427b2012-03-26 20:47:07 +00002225
Ariel Eliorad5afc82013-01-01 05:22:26 +00002226static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2227{
Ariel Elior8db573b2013-01-01 05:22:37 +00002228 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002229 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002230
Ariel Eliorad5afc82013-01-01 05:22:26 +00002231 /* number of queues for statistics is number of eth queues + FCoE */
2232 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002233
Ariel Eliorad5afc82013-01-01 05:22:26 +00002234 /* Total number of FW statistics requests =
2235 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2236 * and fcoe l2 queue) stats + num of queues (which includes another 1
2237 * for fcoe l2 queue if applicable)
2238 */
2239 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2240
Ariel Elior8db573b2013-01-01 05:22:37 +00002241 /* vf stats appear in the request list, but their data is allocated by
2242 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2243 * it is used to determine where to place the vf stats queries in the
2244 * request struct
2245 */
2246 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002247 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002248
Ariel Eliorad5afc82013-01-01 05:22:26 +00002249 /* Request is built from stats_query_header and an array of
2250 * stats_query_cmd_group each of which contains
2251 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2252 * configured in the stats_query_header.
2253 */
2254 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002255 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2256 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002257 1 : 0));
2258
Ariel Elior8db573b2013-01-01 05:22:37 +00002259 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2260 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002261 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2262 num_groups * sizeof(struct stats_query_cmd_group);
2263
2264 /* Data for statistics requests + stats_counter
2265 * stats_counter holds per-STORM counters that are incremented
2266 * when STORM has finished with the current request.
2267 * memory for FCoE offloaded statistics are counted anyway,
2268 * even if they will not be sent.
2269 * VF stats are not accounted for here as the data of VF stats is stored
2270 * in memory allocated by the VF, not here.
2271 */
2272 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2273 sizeof(struct per_pf_stats) +
2274 sizeof(struct fcoe_statistics_params) +
2275 sizeof(struct per_queue_stats) * num_queue_stats +
2276 sizeof(struct stats_counter);
2277
Joe Perchescd2b0382014-02-20 13:25:51 -08002278 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2279 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2280 if (!bp->fw_stats)
2281 goto alloc_mem_err;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002282
2283 /* Set shortcuts */
2284 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2285 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2286 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2287 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2288 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2289 bp->fw_stats_req_sz;
2290
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002291 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002292 U64_HI(bp->fw_stats_req_mapping),
2293 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002294 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002295 U64_HI(bp->fw_stats_data_mapping),
2296 U64_LO(bp->fw_stats_data_mapping));
2297 return 0;
2298
2299alloc_mem_err:
2300 bnx2x_free_fw_stats_mem(bp);
2301 BNX2X_ERR("Can't allocate FW stats memory\n");
2302 return -ENOMEM;
2303}
2304
2305/* send load request to mcp and analyze response */
2306static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2307{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002308 u32 param;
2309
Ariel Eliorad5afc82013-01-01 05:22:26 +00002310 /* init fw_seq */
2311 bp->fw_seq =
2312 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2313 DRV_MSG_SEQ_NUMBER_MASK);
2314 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2315
2316 /* Get current FW pulse sequence */
2317 bp->fw_drv_pulse_wr_seq =
2318 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2319 DRV_PULSE_SEQ_MASK);
2320 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2321
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002322 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2323
2324 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2325 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2326
Ariel Eliorad5afc82013-01-01 05:22:26 +00002327 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002328 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002329
2330 /* if mcp fails to respond we must abort */
2331 if (!(*load_code)) {
2332 BNX2X_ERR("MCP response failure, aborting\n");
2333 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002334 }
2335
Ariel Eliorad5afc82013-01-01 05:22:26 +00002336 /* If mcp refused (e.g. other port is in diagnostic mode) we
2337 * must abort
2338 */
2339 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2340 BNX2X_ERR("MCP refused load request, aborting\n");
2341 return -EBUSY;
2342 }
2343 return 0;
2344}
2345
2346/* check whether another PF has already loaded FW to chip. In
2347 * virtualized environments a pf from another VM may have already
2348 * initialized the device including loading FW
2349 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002350int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002351{
2352 /* is another pf loaded on this engine? */
2353 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2354 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2355 /* build my FW version dword */
2356 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2357 (BCM_5710_FW_MINOR_VERSION << 8) +
2358 (BCM_5710_FW_REVISION_VERSION << 16) +
2359 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2360
2361 /* read loaded FW from chip */
2362 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2363
2364 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2365 loaded_fw, my_fw);
2366
2367 /* abort nic load if version mismatch */
2368 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002369 if (print_err)
2370 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2371 loaded_fw, my_fw);
2372 else
2373 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2374 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002375 return -EBUSY;
2376 }
2377 }
2378 return 0;
2379}
2380
2381/* returns the "mcp load_code" according to global load_count array */
2382static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2383{
2384 int path = BP_PATH(bp);
2385
2386 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002387 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2388 bnx2x_load_count[path][2]);
2389 bnx2x_load_count[path][0]++;
2390 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002391 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002392 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2393 bnx2x_load_count[path][2]);
2394 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002395 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002396 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002397 return FW_MSG_CODE_DRV_LOAD_PORT;
2398 else
2399 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2400}
2401
2402/* mark PMF if applicable */
2403static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2404{
2405 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2406 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2407 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2408 bp->port.pmf = 1;
2409 /* We need the barrier to ensure the ordering between the
2410 * writing to bp->port.pmf here and reading it from the
2411 * bnx2x_periodic_task().
2412 */
2413 smp_mb();
2414 } else {
2415 bp->port.pmf = 0;
2416 }
2417
2418 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2419}
2420
2421static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2422{
2423 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2424 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2425 (bp->common.shmem2_base)) {
2426 if (SHMEM2_HAS(bp, dcc_support))
2427 SHMEM2_WR(bp, dcc_support,
2428 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2429 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2430 if (SHMEM2_HAS(bp, afex_driver_support))
2431 SHMEM2_WR(bp, afex_driver_support,
2432 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2433 }
2434
2435 /* Set AFEX default VLAN tag to an invalid value */
2436 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002437}
2438
Eric Dumazet1191cb82012-04-27 21:39:21 +00002439/**
2440 * bnx2x_bz_fp - zero content of the fastpath structure.
2441 *
2442 * @bp: driver handle
2443 * @index: fastpath index to be zeroed
2444 *
2445 * Makes sure the contents of the bp->fp[index].napi is kept
2446 * intact.
2447 */
2448static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2449{
2450 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002451 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002452 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002453 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002454
Eric Dumazet1191cb82012-04-27 21:39:21 +00002455 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002456 if (fp->tpa_info)
2457 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2458 sizeof(struct bnx2x_agg_info));
2459 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002460
2461 /* Restore the NAPI object as it has been already initialized */
2462 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002463 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002464 fp->bp = bp;
2465 fp->index = index;
2466 if (IS_ETH_FP(fp))
2467 fp->max_cos = bp->max_cos;
2468 else
2469 /* Special queues support only one CoS */
2470 fp->max_cos = 1;
2471
Merav Sicron65565882012-06-19 07:48:26 +00002472 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002473 if (IS_FCOE_FP(fp))
2474 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002475 if (IS_ETH_FP(fp))
2476 for_each_cos_in_tx_queue(fp, cos)
2477 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2478 BNX2X_NUM_ETH_QUEUES(bp) + index];
2479
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002480 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002481 * minimal size so it must be set prior to queue memory allocation
2482 */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002483 if (bp->dev->features & NETIF_F_LRO)
Eric Dumazet1191cb82012-04-27 21:39:21 +00002484 fp->mode = TPA_MODE_LRO;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002485 else if (bp->dev->features & NETIF_F_GRO &&
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002486 bnx2x_mtu_allows_gro(bp->dev->mtu))
Eric Dumazet1191cb82012-04-27 21:39:21 +00002487 fp->mode = TPA_MODE_GRO;
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002488 else
2489 fp->mode = TPA_MODE_DISABLED;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002490
Michal Schmidt22a8f232015-04-27 17:20:38 +02002491 /* We don't want TPA if it's disabled in bp
2492 * or if this is an FCoE L2 ring.
2493 */
2494 if (bp->disable_tpa || IS_FCOE_FP(fp))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002495 fp->mode = TPA_MODE_DISABLED;
Merav Sicron55c11942012-11-07 00:45:48 +00002496}
2497
Yuval Mintz230d00e2015-07-22 09:16:25 +03002498void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2499{
2500 u32 cur;
2501
2502 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2503 return;
2504
2505 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2506 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2507 cur, state);
2508
2509 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2510}
2511
Merav Sicron55c11942012-11-07 00:45:48 +00002512int bnx2x_load_cnic(struct bnx2x *bp)
2513{
2514 int i, rc, port = BP_PORT(bp);
2515
2516 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2517
2518 mutex_init(&bp->cnic_mutex);
2519
Ariel Eliorad5afc82013-01-01 05:22:26 +00002520 if (IS_PF(bp)) {
2521 rc = bnx2x_alloc_mem_cnic(bp);
2522 if (rc) {
2523 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2524 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2525 }
Merav Sicron55c11942012-11-07 00:45:48 +00002526 }
2527
2528 rc = bnx2x_alloc_fp_mem_cnic(bp);
2529 if (rc) {
2530 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532 }
2533
2534 /* Update the number of queues with the cnic queues */
2535 rc = bnx2x_set_real_num_queues(bp, 1);
2536 if (rc) {
2537 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2538 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 }
2540
2541 /* Add all CNIC NAPI objects */
2542 bnx2x_add_all_napi_cnic(bp);
2543 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2544 bnx2x_napi_enable_cnic(bp);
2545
2546 rc = bnx2x_init_hw_func_cnic(bp);
2547 if (rc)
2548 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2549
2550 bnx2x_nic_init_cnic(bp);
2551
Ariel Eliorad5afc82013-01-01 05:22:26 +00002552 if (IS_PF(bp)) {
2553 /* Enable Timer scan */
2554 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002555
Ariel Eliorad5afc82013-01-01 05:22:26 +00002556 /* setup cnic queues */
2557 for_each_cnic_queue(bp, i) {
2558 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2559 if (rc) {
2560 BNX2X_ERR("Queue setup failed\n");
2561 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2562 }
Merav Sicron55c11942012-11-07 00:45:48 +00002563 }
2564 }
2565
2566 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002567 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002568
2569 /* re-read iscsi info */
2570 bnx2x_get_iscsi_info(bp);
2571 bnx2x_setup_cnic_irq_info(bp);
2572 bnx2x_setup_cnic_info(bp);
2573 bp->cnic_loaded = true;
2574 if (bp->state == BNX2X_STATE_OPEN)
2575 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2576
Merav Sicron55c11942012-11-07 00:45:48 +00002577 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2578
2579 return 0;
2580
2581#ifndef BNX2X_STOP_ON_ERROR
2582load_error_cnic2:
2583 /* Disable Timer scan */
2584 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2585
2586load_error_cnic1:
2587 bnx2x_napi_disable_cnic(bp);
2588 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002589 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002590 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2591load_error_cnic0:
2592 BNX2X_ERR("CNIC-related load failed\n");
2593 bnx2x_free_fp_mem_cnic(bp);
2594 bnx2x_free_mem_cnic(bp);
2595 return rc;
2596#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002597}
2598
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002599/* must be called with rtnl_lock */
2600int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2601{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002602 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002603 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002604
Merav Sicron55c11942012-11-07 00:45:48 +00002605 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2606 DP(NETIF_MSG_IFUP,
2607 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2608
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002609#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002610 if (unlikely(bp->panic)) {
2611 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002612 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002613 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002614#endif
2615
2616 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2617
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002618 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002619 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2620 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2621 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002622
Ariel Eliorad5afc82013-01-01 05:22:26 +00002623 if (IS_PF(bp))
2624 /* must be called before memory allocation and HW init */
2625 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002626
Ariel Elior6383c0b2011-07-14 08:31:57 +00002627 /*
2628 * Zero fastpath structures preserving invariants like napi, which are
2629 * allocated only once, fp index, max_cos, bp pointer.
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002630 * Also set fp->mode and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002631 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002632 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002633 for_each_queue(bp, i)
2634 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002635 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2636 bp->num_cnic_queues) *
2637 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002638
Merav Sicron55c11942012-11-07 00:45:48 +00002639 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002640
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002641 /* Set the receive queues buffer size */
2642 bnx2x_set_rx_buf_size(bp);
2643
Ariel Eliorad5afc82013-01-01 05:22:26 +00002644 if (IS_PF(bp)) {
2645 rc = bnx2x_alloc_mem(bp);
2646 if (rc) {
2647 BNX2X_ERR("Unable to allocate bp memory\n");
2648 return rc;
2649 }
2650 }
2651
Ariel Eliorad5afc82013-01-01 05:22:26 +00002652 /* need to be done after alloc mem, since it's self adjusting to amount
2653 * of memory available for RSS queues
2654 */
2655 rc = bnx2x_alloc_fp_mem(bp);
2656 if (rc) {
2657 BNX2X_ERR("Unable to allocate memory for fps\n");
2658 LOAD_ERROR_EXIT(bp, load_error0);
2659 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002660
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002661 /* Allocated memory for FW statistics */
2662 if (bnx2x_alloc_fw_stats_mem(bp))
2663 LOAD_ERROR_EXIT(bp, load_error0);
2664
Ariel Elior8d9ac292013-01-01 05:22:27 +00002665 /* request pf to initialize status blocks */
2666 if (IS_VF(bp)) {
2667 rc = bnx2x_vfpf_init(bp);
2668 if (rc)
2669 LOAD_ERROR_EXIT(bp, load_error0);
2670 }
2671
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002672 /* As long as bnx2x_alloc_mem() may possibly update
2673 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002674 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002675 */
Merav Sicron55c11942012-11-07 00:45:48 +00002676 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002677 if (rc) {
2678 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002679 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002680 }
2681
Ariel Elior6383c0b2011-07-14 08:31:57 +00002682 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002683 * this configuration may be overridden by a multi class queue
2684 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002685 */
2686 bnx2x_setup_tc(bp->dev, bp->max_cos);
2687
Merav Sicron26614ba2012-08-27 03:26:19 +00002688 /* Add all NAPI objects */
2689 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002690 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002691 bnx2x_napi_enable(bp);
2692
Ariel Eliorad5afc82013-01-01 05:22:26 +00002693 if (IS_PF(bp)) {
2694 /* set pf load just before approaching the MCP */
2695 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002696
Ariel Eliorad5afc82013-01-01 05:22:26 +00002697 /* if mcp exists send load request and analyze response */
2698 if (!BP_NOMCP(bp)) {
2699 /* attempt to load pf */
2700 rc = bnx2x_nic_load_request(bp, &load_code);
2701 if (rc)
2702 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002703
Ariel Eliorad5afc82013-01-01 05:22:26 +00002704 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002705 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002706 if (rc) {
2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002708 LOAD_ERROR_EXIT(bp, load_error2);
2709 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002710 } else {
2711 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002712 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002713
Ariel Eliorad5afc82013-01-01 05:22:26 +00002714 /* mark pmf if applicable */
2715 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002716
Ariel Eliorad5afc82013-01-01 05:22:26 +00002717 /* Init Function state controlling object */
2718 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002719
Ariel Eliorad5afc82013-01-01 05:22:26 +00002720 /* Initialize HW */
2721 rc = bnx2x_init_hw(bp, load_code);
2722 if (rc) {
2723 BNX2X_ERR("HW init failed, aborting\n");
2724 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2725 LOAD_ERROR_EXIT(bp, load_error2);
2726 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002727 }
2728
Yuval Mintzecf01c22013-04-22 02:53:03 +00002729 bnx2x_pre_irq_nic_init(bp);
2730
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002731 /* Connect to IRQs */
2732 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002733 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002734 BNX2X_ERR("setup irqs failed\n");
2735 if (IS_PF(bp))
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002737 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002738 }
2739
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002740 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002741 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002742 /* Setup NIC internals and enable interrupts */
2743 bnx2x_post_irq_nic_init(bp, load_code);
2744
Ariel Eliorad5afc82013-01-01 05:22:26 +00002745 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002746 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002747
Ariel Eliorad5afc82013-01-01 05:22:26 +00002748 /* Set AFEX default VLAN tag to an invalid value */
2749 bp->afex_def_vlan_tag = -1;
2750 bnx2x_nic_load_afex_dcc(bp, load_code);
2751 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2752 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002753 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002754 BNX2X_ERR("Function start failed!\n");
2755 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2756
Merav Sicron55c11942012-11-07 00:45:48 +00002757 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002758 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002759
Ariel Eliorad5afc82013-01-01 05:22:26 +00002760 /* Send LOAD_DONE command to MCP */
2761 if (!BP_NOMCP(bp)) {
2762 load_code = bnx2x_fw_command(bp,
2763 DRV_MSG_CODE_LOAD_DONE, 0);
2764 if (!load_code) {
2765 BNX2X_ERR("MCP response failure, aborting\n");
2766 rc = -EBUSY;
2767 LOAD_ERROR_EXIT(bp, load_error3);
2768 }
2769 }
2770
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002771 /* initialize FW coalescing state machines in RAM */
2772 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002773 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002774
Ariel Elior60cad4e2013-09-04 14:09:22 +03002775 /* setup the leading queue */
2776 rc = bnx2x_setup_leading(bp);
2777 if (rc) {
2778 BNX2X_ERR("Setup leading failed!\n");
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
2781
2782 /* set up the rest of the queues */
2783 for_each_nondefault_eth_queue(bp, i) {
2784 if (IS_PF(bp))
2785 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2786 else /* VF */
2787 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002788 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002789 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002790 LOAD_ERROR_EXIT(bp, load_error3);
2791 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002792 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002793
Ariel Elior60cad4e2013-09-04 14:09:22 +03002794 /* setup rss */
2795 rc = bnx2x_init_rss(bp);
2796 if (rc) {
2797 BNX2X_ERR("PF RSS init failed\n");
2798 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002799 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002800
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002801 /* Now when Clients are configured we are ready to work */
2802 bp->state = BNX2X_STATE_OPEN;
2803
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002804 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002805 if (IS_PF(bp))
2806 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002807 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002808 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2809 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002810 if (rc) {
2811 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002812 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002813 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002814
Ariel Eliorad5afc82013-01-01 05:22:26 +00002815 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002816 bnx2x_update_max_mf_config(bp, bp->pending_max);
2817 bp->pending_max = 0;
2818 }
2819
Ariel Eliorad5afc82013-01-01 05:22:26 +00002820 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc)
2823 LOAD_ERROR_EXIT(bp, load_error3);
2824 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002825 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002826
2827 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002828
Yuval Mintz05cc5a32015-07-29 15:52:46 +03002829 /* Re-configure vlan filters */
2830 rc = bnx2x_vlan_reconfigure_vid(bp);
2831 if (rc)
2832 LOAD_ERROR_EXIT(bp, load_error3);
2833
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002834 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002835 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002836
Michal Kalderoneeed0182014-08-17 16:47:44 +03002837 if (bp->flags & PTP_SUPPORTED) {
2838 bnx2x_init_ptp(bp);
2839 bnx2x_configure_ptp_filters(bp);
2840 }
2841 /* Start Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002842 switch (load_mode) {
2843 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002844 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002845 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002846 break;
2847
2848 case LOAD_OPEN:
2849 netif_tx_start_all_queues(bp->dev);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002850 smp_mb__after_atomic();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002851 break;
2852
2853 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002854 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002855 bp->state = BNX2X_STATE_DIAG;
2856 break;
2857
2858 default:
2859 break;
2860 }
2861
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002862 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002863 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002864 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002865 bnx2x__link_status_update(bp);
2866
2867 /* start the timer */
2868 mod_timer(&bp->timer, jiffies + bp->current_interval);
2869
Merav Sicron55c11942012-11-07 00:45:48 +00002870 if (CNIC_ENABLED(bp))
2871 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002872
Yuval Mintz42f82772014-03-23 18:12:23 +02002873 if (IS_PF(bp))
2874 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2875
Ariel Eliorad5afc82013-01-01 05:22:26 +00002876 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2877 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002878 u32 val;
2879 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
Yuval Mintz230d00e2015-07-22 09:16:25 +03002880 val &= ~DRV_FLAGS_MTU_MASK;
2881 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002882 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2883 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2884 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2885 }
2886
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002887 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002888 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002889 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002890 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002891 return -EBUSY;
2892 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002893
Yuval Mintzc48f3502015-07-22 09:16:26 +03002894 /* Update driver data for On-Chip MFW dump. */
2895 if (IS_PF(bp))
2896 bnx2x_update_mfw_dump(bp);
2897
Barak Witkowski98768792012-06-19 07:48:31 +00002898 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2899 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2900 bnx2x_dcbx_init(bp, false);
2901
Yuval Mintz230d00e2015-07-22 09:16:25 +03002902 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2903 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2904
Merav Sicron55c11942012-11-07 00:45:48 +00002905 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2906
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002907 return 0;
2908
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002909#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002910load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002911 if (IS_PF(bp)) {
2912 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002913
Ariel Eliorad5afc82013-01-01 05:22:26 +00002914 /* Clean queueable objects */
2915 bnx2x_squeeze_objects(bp);
2916 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002917
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002918 /* Free SKBs, SGEs, TPA pool and driver internals */
2919 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002920 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002921 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002922
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002923 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002924 bnx2x_free_irq(bp);
2925load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002926 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2929 }
2930
2931 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002932load_error1:
2933 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002934 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002935
Ariel Elior889b9af2012-01-26 06:01:51 +00002936 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002937 if (IS_PF(bp))
2938 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002939load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002940 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002941 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942 bnx2x_free_mem(bp);
2943
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002944 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002945#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002946}
2947
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002948int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002949{
2950 u8 rc = 0, cos, i;
2951
2952 /* Wait until tx fastpath tasks complete */
2953 for_each_tx_queue(bp, i) {
2954 struct bnx2x_fastpath *fp = &bp->fp[i];
2955
2956 for_each_cos_in_tx_queue(fp, cos)
2957 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2958 if (rc)
2959 return rc;
2960 }
2961 return 0;
2962}
2963
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002964/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002965int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002966{
2967 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002968 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002969
Merav Sicron55c11942012-11-07 00:45:48 +00002970 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2971
Yuval Mintz230d00e2015-07-22 09:16:25 +03002972 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2973 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2974
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002975 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002976 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002977 u32 val;
2978 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2979 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2980 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2981 }
2982
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002983 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002984 (bp->state == BNX2X_STATE_CLOSED ||
2985 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002986 /* We can get here if the driver has been unloaded
2987 * during parity error recovery and is either waiting for a
2988 * leader to complete or for other functions to unload and
2989 * then ifdown has been issued. In this case we want to
2990 * unload and let other functions to complete a recovery
2991 * process.
2992 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002993 bp->recovery_state = BNX2X_RECOVERY_DONE;
2994 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002995 bnx2x_release_leader_lock(bp);
2996 smp_mb();
2997
Merav Sicron51c1a582012-03-18 10:33:38 +00002998 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2999 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003000 return -EINVAL;
3001 }
3002
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003003 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003004 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003005 *
3006 * we can get here only after unsuccessful ndo_* callback, during which
3007 * dev->IFF_UP flag is still on.
3008 */
3009 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3010 return 0;
3011
3012 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07003013 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3014 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3015 */
3016 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3017 smp_mb();
3018
Ariel Elior78c3bcc2013-06-20 17:39:08 +03003019 /* indicate to VFs that the PF is going down */
3020 bnx2x_iov_channel_down(bp);
3021
Merav Sicron55c11942012-11-07 00:45:48 +00003022 if (CNIC_LOADED(bp))
3023 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3024
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003025 /* Stop Tx */
3026 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003027 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003028
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003029 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003030
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003031 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003032
Ariel Eliorad5afc82013-01-01 05:22:26 +00003033 if (IS_PF(bp)) {
3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp);
3037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3038 bnx2x_save_statistics(bp);
3039 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003040
Yuval Mintzd78a1f02016-03-13 21:21:48 +02003041 /* wait till consumers catch up with producers in all queues.
3042 * If we're recovering, FW can't write to host so no reason
3043 * to wait for the queues to complete all Tx.
3044 */
3045 if (unload_mode != UNLOAD_RECOVERY)
3046 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003047
Ariel Elior9b176b62013-01-01 05:22:28 +00003048 /* if VF indicate to PF this function is going down (PF will delete sp
3049 * elements and clear initializations
3050 */
3051 if (IS_VF(bp))
3052 bnx2x_vfpf_close_vf(bp);
3053 else if (unload_mode != UNLOAD_RECOVERY)
3054 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00003055 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003056 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003057 /* Send the UNLOAD_REQUEST to the MCP */
3058 bnx2x_send_unload_req(bp, unload_mode);
3059
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003060 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003061 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003062 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003063 * (the engine which leader will perform the recovery
3064 * last).
3065 */
3066 if (!CHIP_IS_E1x(bp))
3067 bnx2x_pf_disable(bp);
3068
3069 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003070 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00003071 /* Delete all NAPI objects */
3072 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003073 if (CNIC_LOADED(bp))
3074 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003075 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003076 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003077
3078 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00003079 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003080 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003081
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003082 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003083 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003084 * the queueable objects here in case they failed to get cleaned so far.
3085 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003086 if (IS_PF(bp))
3087 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003088
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003089 /* There should be no more pending SP commands at this stage */
3090 bp->sp_state = 0;
3091
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003092 bp->port.pmf = 0;
3093
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003094 /* clear pending work in rtnl task */
3095 bp->sp_rtnl_state = 0;
3096 smp_mb();
3097
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003098 /* Free SKBs, SGEs, TPA pool and driver internals */
3099 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003100 if (CNIC_LOADED(bp))
3101 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003102 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003103 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003104
Ariel Eliorad5afc82013-01-01 05:22:26 +00003105 bnx2x_free_fp_mem(bp);
3106 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003107 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003108
Ariel Eliorad5afc82013-01-01 05:22:26 +00003109 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003110 if (CNIC_LOADED(bp))
3111 bnx2x_free_mem_cnic(bp);
3112 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003113 bnx2x_free_mem(bp);
3114
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003115 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003116 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003117
Yuval Mintz42f82772014-03-23 18:12:23 +02003118 /* Clear driver version indication in shmem */
3119 if (IS_PF(bp))
3120 bnx2x_update_mng_version(bp);
3121
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003122 /* Check if there are pending parity attentions. If there are - set
3123 * RECOVERY_IN_PROGRESS.
3124 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003125 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003126 bnx2x_set_reset_in_progress(bp);
3127
3128 /* Set RESET_IS_GLOBAL if needed */
3129 if (global)
3130 bnx2x_set_reset_global(bp);
3131 }
3132
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003133 /* The last driver must disable a "close the gate" if there is no
3134 * parity attention or "process kill" pending.
3135 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003136 if (IS_PF(bp) &&
3137 !bnx2x_clear_pf_load(bp) &&
3138 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003139 bnx2x_disable_close_the_gate(bp);
3140
Merav Sicron55c11942012-11-07 00:45:48 +00003141 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3142
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003143 return 0;
3144}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003145
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003146int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3147{
3148 u16 pmcsr;
3149
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003150 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003151 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003152 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003153 return 0;
3154 }
3155
Jon Mason29ed74c2013-09-11 11:22:39 -07003156 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003157
3158 switch (state) {
3159 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003160 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3162 PCI_PM_CTRL_PME_STATUS));
3163
3164 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3165 /* delay required during transition out of D3hot */
3166 msleep(20);
3167 break;
3168
3169 case PCI_D3hot:
3170 /* If there are other clients above don't
3171 shut down the power */
3172 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3173 return 0;
3174 /* Don't shut down the power for emulation and FPGA */
3175 if (CHIP_REV_IS_SLOW(bp))
3176 return 0;
3177
3178 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3179 pmcsr |= 3;
3180
3181 if (bp->wol)
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183
Jon Mason29ed74c2013-09-11 11:22:39 -07003184 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003185 pmcsr);
3186
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3189 */
3190 break;
3191
3192 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003193 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003194 return -EINVAL;
3195 }
3196 return 0;
3197}
3198
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003199/*
3200 * net_device service functions
3201 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003202static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003203{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003204 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3205 napi);
3206 struct bnx2x *bp = fp->bp;
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003207 int rx_work_done;
3208 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003209
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003210#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003211 if (unlikely(bp->panic)) {
3212 napi_complete(napi);
3213 return 0;
3214 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003215#endif
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003216 for_each_cos_in_tx_queue(fp, cos)
3217 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3218 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003219
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003220 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003221
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003222 if (rx_work_done < budget) {
3223 /* No need to update SB for FCoE L2 ring as long as
3224 * it's connected to the default SB and the SB
3225 * has been updated when NAPI was scheduled.
3226 */
3227 if (IS_FCOE_FP(fp)) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08003228 napi_complete_done(napi, rx_work_done);
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003229 } else {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003230 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003231 /* bnx2x_has_rx_work() reads the status block,
3232 * thus we need to ensure that status block indices
3233 * have been actually read (bnx2x_update_fpsb_idx)
3234 * prior to this check (bnx2x_has_rx_work) so that
3235 * we won't write the "newer" value of the status block
3236 * to IGU (if there was a DMA right after
3237 * bnx2x_has_rx_work and if there is no rmb, the memory
3238 * reading (bnx2x_update_fpsb_idx) may be postponed
3239 * to right before bnx2x_ack_sb). In this case there
3240 * will never be another interrupt until there is
3241 * another update of the status block, while there
3242 * is still unhandled work.
3243 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003244 rmb();
3245
3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Eric Dumazet80f1c212016-11-15 10:15:15 -08003247 if (napi_complete_done(napi, rx_work_done)) {
3248 /* Re-enable interrupts */
3249 DP(NETIF_MSG_RX_STATUS,
3250 "Update index to %d\n", fp->fp_hc_idx);
3251 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3252 le16_to_cpu(fp->fp_hc_idx),
3253 IGU_INT_ENABLE, 1);
3254 }
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003255 } else {
3256 rx_work_done = budget;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003257 }
3258 }
3259 }
3260
Eric Dumazet4d6acb62015-12-08 05:54:40 -08003261 return rx_work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003262}
3263
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003264/* we split the first BD into headers and data BDs
3265 * to ease the pain of our fellow microcode engineers
3266 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003267 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003268static u16 bnx2x_tx_split(struct bnx2x *bp,
3269 struct bnx2x_fp_txdata *txdata,
3270 struct sw_tx_bd *tx_buf,
3271 struct eth_tx_start_bd **tx_bd, u16 hlen,
3272 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003273{
3274 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3275 struct eth_tx_bd *d_tx_bd;
3276 dma_addr_t mapping;
3277 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3278
3279 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003280 h_tx_bd->nbytes = cpu_to_le16(hlen);
3281
Dmitry Kravkov91226792013-03-11 05:17:52 +00003282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3283 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003284
3285 /* now get a new data BD
3286 * (after the pbd) and fill it */
3287 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003288 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003289
3290 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3291 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3292
3293 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3296
3297 /* this marks the BD as one that has no individual mapping */
3298 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3299
3300 DP(NETIF_MSG_TX_QUEUED,
3301 "TSO split data size is %d (%x:%x)\n",
3302 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3303
3304 /* update tx_bd */
3305 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3306
3307 return bd_prod;
3308}
3309
Yuval Mintz86564c32013-01-23 03:21:50 +00003310#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003312static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003313{
Yuval Mintz86564c32013-01-23 03:21:50 +00003314 __sum16 tsum = (__force __sum16) csum;
3315
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003316 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003317 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3318 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003319
3320 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003321 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3322 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003323
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003324 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003325}
3326
Dmitry Kravkov91226792013-03-11 05:17:52 +00003327static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003328{
3329 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003330 __u8 prot = 0;
3331 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003332
3333 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003334 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003335
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003336 protocol = vlan_get_protocol(skb);
3337 if (protocol == htons(ETH_P_IPV6)) {
3338 rc = XMIT_CSUM_V6;
3339 prot = ipv6_hdr(skb)->nexthdr;
3340 } else {
3341 rc = XMIT_CSUM_V4;
3342 prot = ip_hdr(skb)->protocol;
3343 }
3344
3345 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3346 if (inner_ip_hdr(skb)->version == 6) {
3347 rc |= XMIT_CSUM_ENC_V6;
3348 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003349 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003350 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003351 rc |= XMIT_CSUM_ENC_V4;
3352 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003353 rc |= XMIT_CSUM_TCP;
3354 }
3355 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003356 if (prot == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003358
Eric Dumazet36a8f392013-09-29 01:21:32 -07003359 if (skb_is_gso(skb)) {
3360 if (skb_is_gso_v6(skb)) {
3361 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3362 if (rc & XMIT_CSUM_ENC)
3363 rc |= XMIT_GSO_ENC_V6;
3364 } else {
3365 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3366 if (rc & XMIT_CSUM_ENC)
3367 rc |= XMIT_GSO_ENC_V4;
3368 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003369 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003370
3371 return rc;
3372}
3373
Yuval Mintzea2465a2015-12-18 10:42:12 +02003374/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3375#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3376
3377/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3378#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3379
3380#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003381/* check if packet requires linearization (packet is too fragmented)
3382 no need to check fragmentation if page size > 8K (there will be no
3383 violation to FW restrictions) */
3384static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3385 u32 xmit_type)
3386{
Yuval Mintzea2465a2015-12-18 10:42:12 +02003387 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3388 int to_copy = 0, hlen = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003389
Yuval Mintzea2465a2015-12-18 10:42:12 +02003390 if (xmit_type & XMIT_GSO_ENC)
3391 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003392
Yuval Mintzea2465a2015-12-18 10:42:12 +02003393 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003394 if (xmit_type & XMIT_GSO) {
3395 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
Yuval Mintzea2465a2015-12-18 10:42:12 +02003396 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003397 /* Number of windows to check */
3398 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3399 int wnd_idx = 0;
3400 int frag_idx = 0;
3401 u32 wnd_sum = 0;
3402
3403 /* Headers length */
Yuval Mintz592b9b82015-06-25 15:19:29 +03003404 if (xmit_type & XMIT_GSO_ENC)
3405 hlen = (int)(skb_inner_transport_header(skb) -
3406 skb->data) +
3407 inner_tcp_hdrlen(skb);
3408 else
3409 hlen = (int)(skb_transport_header(skb) -
3410 skb->data) + tcp_hdrlen(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003411
3412 /* Amount of data (w/o headers) on linear part of SKB*/
3413 first_bd_sz = skb_headlen(skb) - hlen;
3414
3415 wnd_sum = first_bd_sz;
3416
3417 /* Calculate the first sum - it's special */
3418 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3419 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003420 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003421
3422 /* If there was data on linear skb data - check it */
3423 if (first_bd_sz > 0) {
3424 if (unlikely(wnd_sum < lso_mss)) {
3425 to_copy = 1;
3426 goto exit_lbl;
3427 }
3428
3429 wnd_sum -= first_bd_sz;
3430 }
3431
3432 /* Others are easier: run through the frag list and
3433 check all windows */
3434 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3435 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003436 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003437
3438 if (unlikely(wnd_sum < lso_mss)) {
3439 to_copy = 1;
3440 break;
3441 }
3442 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003443 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003444 }
3445 } else {
3446 /* in non-LSO too fragmented packet should always
3447 be linearized */
3448 to_copy = 1;
3449 }
3450 }
3451
3452exit_lbl:
3453 if (unlikely(to_copy))
3454 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003455 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003456 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3457 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3458
3459 return to_copy;
3460}
3461#endif
3462
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003463/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003464 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003465 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003466 * @skb: packet skb
3467 * @pbd: parse BD
3468 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003469 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003470static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3471 struct eth_tx_parse_bd_e1x *pbd,
3472 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003473{
3474 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003475 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003476 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003477
3478 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003479 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003480 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003481 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3482 ip_hdr(skb)->daddr,
3483 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003484 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003485 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003486 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3487 &ipv6_hdr(skb)->daddr,
3488 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003489 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003490
Yuval Mintz86564c32013-01-23 03:21:50 +00003491 pbd->global_data |=
3492 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003493}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003494
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003495/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003496 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3497 *
3498 * @bp: driver handle
3499 * @skb: packet skb
3500 * @parsing_data: data to be updated
3501 * @xmit_type: xmit flags
3502 *
3503 * 57712/578xx related, when skb has encapsulation
3504 */
3505static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3506 u32 *parsing_data, u32 xmit_type)
3507{
3508 *parsing_data |=
3509 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3511 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3512
3513 if (xmit_type & XMIT_CSUM_TCP) {
3514 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3516 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3517
3518 return skb_inner_transport_header(skb) +
3519 inner_tcp_hdrlen(skb) - skb->data;
3520 }
3521
3522 /* We support checksum offload for TCP and UDP only.
3523 * No need to pass the UDP header length - it's a constant.
3524 */
3525 return skb_inner_transport_header(skb) +
3526 sizeof(struct udphdr) - skb->data;
3527}
3528
3529/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003530 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003531 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003532 * @bp: driver handle
3533 * @skb: packet skb
3534 * @parsing_data: data to be updated
3535 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003536 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003537 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003538 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003539static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3540 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003541{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003542 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003543 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3545 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003546
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003547 if (xmit_type & XMIT_CSUM_TCP) {
3548 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3550 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003551
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003552 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003553 }
3554 /* We support checksum offload for TCP and UDP only.
3555 * No need to pass the UDP header length - it's a constant.
3556 */
3557 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003558}
3559
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003560/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003561static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3562 struct eth_tx_start_bd *tx_start_bd,
3563 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003564{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003565 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3566
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003567 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003568 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003569
3570 if (!(xmit_type & XMIT_CSUM_TCP))
3571 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003572}
3573
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003574/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003575 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003576 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003577 * @bp: driver handle
3578 * @skb: packet skb
3579 * @pbd: parse BD to be updated
3580 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003581 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003582static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3583 struct eth_tx_parse_bd_e1x *pbd,
3584 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003585{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003586 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003587
3588 /* for now NS flag is not used in Linux */
3589 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003590 cpu_to_le16(hlen |
3591 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3592 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003593
3594 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003595 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003596
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003597 hlen += pbd->ip_hlen_w;
3598
3599 /* We support checksum offload for TCP and UDP only */
3600 if (xmit_type & XMIT_CSUM_TCP)
3601 hlen += tcp_hdrlen(skb) / 2;
3602 else
3603 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003604
3605 pbd->total_hlen_w = cpu_to_le16(hlen);
3606 hlen = hlen*2;
3607
3608 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003609 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003610
3611 } else {
3612 s8 fix = SKB_CS_OFF(skb); /* signed! */
3613
3614 DP(NETIF_MSG_TX_QUEUED,
3615 "hlen %d fix %d csum before fix %x\n",
3616 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3617
3618 /* HW bug: fixup the CSUM */
3619 pbd->tcp_pseudo_csum =
3620 bnx2x_csum_fix(skb_transport_header(skb),
3621 SKB_CS(skb), fix);
3622
3623 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3624 pbd->tcp_pseudo_csum);
3625 }
3626
3627 return hlen;
3628}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003629
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003630static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3631 struct eth_tx_parse_bd_e2 *pbd_e2,
3632 struct eth_tx_parse_2nd_bd *pbd2,
3633 u16 *global_data,
3634 u32 xmit_type)
3635{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003636 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003637 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003638
Dmitry Kravkove287a752013-03-21 15:38:24 +00003639 /* from outer IP to transport */
3640 hlen_w = (skb_inner_transport_header(skb) -
3641 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003642
3643 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003644 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003645
Dmitry Kravkove287a752013-03-21 15:38:24 +00003646 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003647
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003648 /* outer IP header info */
3649 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003650 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003651 u32 csum = (__force u32)(~iph->check) -
3652 (__force u32)iph->tot_len -
3653 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003654
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003655 outerip_len = iph->ihl << 1;
3656
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003657 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003658 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003659 } else {
3660 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003661 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003662 pbd_e2->data.tunnel_data.flags |=
Yuval Mintz28311f82015-07-22 09:16:22 +03003663 ETH_TUNNEL_DATA_IPV6_OUTER;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003664 }
3665
3666 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3667
3668 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3669
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003670 /* inner IP header info */
3671 if (xmit_type & XMIT_CSUM_ENC_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003672 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003673
3674 pbd_e2->data.tunnel_data.pseudo_csum =
3675 bswab16(~csum_tcpudp_magic(
3676 inner_ip_hdr(skb)->saddr,
3677 inner_ip_hdr(skb)->daddr,
3678 0, IPPROTO_TCP, 0));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003679 } else {
3680 pbd_e2->data.tunnel_data.pseudo_csum =
3681 bswab16(~csum_ipv6_magic(
3682 &inner_ipv6_hdr(skb)->saddr,
3683 &inner_ipv6_hdr(skb)->daddr,
3684 0, IPPROTO_TCP, 0));
3685 }
3686
3687 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3688
3689 *global_data |=
3690 outerip_off |
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003691 (outerip_len <<
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3694 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003695
3696 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3697 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3698 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3699 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003700}
3701
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003702static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3703 u32 xmit_type)
3704{
3705 struct ipv6hdr *ipv6;
3706
3707 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3708 return;
3709
3710 if (xmit_type & XMIT_GSO_ENC_V6)
3711 ipv6 = inner_ipv6_hdr(skb);
3712 else /* XMIT_GSO_V6 */
3713 ipv6 = ipv6_hdr(skb);
3714
3715 if (ipv6->nexthdr == NEXTHDR_IPV6)
3716 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3717}
3718
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003719/* called with netif_tx_lock
3720 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3721 * netif_wake_queue()
3722 */
3723netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3724{
3725 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003726
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003727 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003728 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003729 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003730 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003731 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003732 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003733 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003734 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003735 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003736 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003737 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003738 dma_addr_t mapping;
3739 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3740 int i;
3741 u8 hlen = 0;
3742 __le16 pkt_size = 0;
3743 struct ethhdr *eth;
3744 u8 mac_type = UNICAST_ADDRESS;
3745
3746#ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp->panic))
3748 return NETDEV_TX_BUSY;
3749#endif
3750
Ariel Elior6383c0b2011-07-14 08:31:57 +00003751 txq_index = skb_get_queue_mapping(skb);
3752 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003753
Merav Sicron55c11942012-11-07 00:45:48 +00003754 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003755
Merav Sicron65565882012-06-19 07:48:26 +00003756 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003757
3758 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003759 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003760 txq_index, fp_index, txdata_index); */
3761
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003762 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003763 DP(NETIF_MSG_TX_QUEUED,
3764 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003765 txdata->cid, fp_index, txdata_index, txdata, fp); */
3766
3767 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003768 skb_shinfo(skb)->nr_frags +
3769 BDS_PER_TX_PKT +
3770 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003771 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003772 if (txdata->tx_ring_size == 0) {
3773 struct bnx2x_eth_q_stats *q_stats =
3774 bnx2x_fp_qstats(bp, txdata->parent_fp);
3775 q_stats->driver_filtered_tx_pkt++;
3776 dev_kfree_skb(skb);
3777 return NETDEV_TX_OK;
3778 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003779 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3780 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003781 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003782
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003783 return NETDEV_TX_BUSY;
3784 }
3785
Merav Sicron51c1a582012-03-18 10:33:38 +00003786 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003787 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003788 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003789 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3790 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003791
3792 eth = (struct ethhdr *)skb->data;
3793
3794 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3795 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3796 if (is_broadcast_ether_addr(eth->h_dest))
3797 mac_type = BROADCAST_ADDRESS;
3798 else
3799 mac_type = MULTICAST_ADDRESS;
3800 }
3801
Dmitry Kravkov91226792013-03-11 05:17:52 +00003802#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003803 /* First, check if we need to linearize the skb (due to FW
3804 restrictions). No need to check fragmentation if page size > 8K
3805 (there will be no violation to FW restrictions) */
3806 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3807 /* Statistics of linearization */
3808 bp->lin_cnt++;
3809 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003810 DP(NETIF_MSG_TX_QUEUED,
3811 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003812 dev_kfree_skb_any(skb);
3813 return NETDEV_TX_OK;
3814 }
3815 }
3816#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003817 /* Map skb linear data for DMA */
3818 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3819 skb_headlen(skb), DMA_TO_DEVICE);
3820 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003821 DP(NETIF_MSG_TX_QUEUED,
3822 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003823 dev_kfree_skb_any(skb);
3824 return NETDEV_TX_OK;
3825 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003826 /*
3827 Please read carefully. First we use one BD which we mark as start,
3828 then we have a parsing info BD (used for TSO or xsum),
3829 and only then we have the rest of the TSO BDs.
3830 (don't forget to mark the last one as last,
3831 and to unmap only AFTER you write to the BD ...)
3832 And above all, all pdb sizes are in words - NOT DWORDS!
3833 */
3834
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003835 /* get current pkt produced now - advance it just before sending packet
3836 * since mapping of pages may fail and cause packet to be dropped
3837 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003838 pkt_prod = txdata->tx_pkt_prod;
3839 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003840
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003841 /* get a tx_buf and first BD
3842 * tx_start_bd may be changed during SPLIT,
3843 * but first_bd will always stay first
3844 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003845 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3846 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003847 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003848
3849 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003850
Michal Kalderoneeed0182014-08-17 16:47:44 +03003851 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3852 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3853 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3854 } else if (bp->ptp_tx_skb) {
3855 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3856 } else {
3857 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3858 /* schedule check for Tx timestamp */
3859 bp->ptp_tx_skb = skb_get(skb);
3860 bp->ptp_tx_start = jiffies;
3861 schedule_work(&bp->ptp_task);
3862 }
3863 }
3864
Dmitry Kravkov91226792013-03-11 05:17:52 +00003865 /* header nbd: indirectly zero other flags! */
3866 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003867
3868 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003869 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003870 tx_buf->skb = skb;
3871 tx_buf->flags = 0;
3872
3873 DP(NETIF_MSG_TX_QUEUED,
3874 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003875 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003876
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003877 if (skb_vlan_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003878 tx_start_bd->vlan_or_ethertype =
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003879 cpu_to_le16(skb_vlan_tag_get(skb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003880 tx_start_bd->bd_flags.as_bitfield |=
3881 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003882 } else {
3883 /* when transmitting in a vf, start bd must hold the ethertype
3884 * for fw to enforce it
3885 */
Mintz, Yuval92f85f02017-06-09 17:17:01 +03003886 u16 vlan_tci = 0;
Yuval Mintzea36475a2014-08-25 17:48:30 +03003887#ifndef BNX2X_STOP_ON_ERROR
Mintz, Yuval92f85f02017-06-09 17:17:01 +03003888 if (IS_VF(bp)) {
Yuval Mintzea36475a2014-08-25 17:48:30 +03003889#endif
Mintz, Yuval92f85f02017-06-09 17:17:01 +03003890 /* Still need to consider inband vlan for enforced */
3891 if (__vlan_get_tag(skb, &vlan_tci)) {
3892 tx_start_bd->vlan_or_ethertype =
3893 cpu_to_le16(ntohs(eth->h_proto));
3894 } else {
3895 tx_start_bd->bd_flags.as_bitfield |=
3896 (X_ETH_INBAND_VLAN <<
3897 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3898 tx_start_bd->vlan_or_ethertype =
3899 cpu_to_le16(vlan_tci);
3900 }
Yuval Mintzea36475a2014-08-25 17:48:30 +03003901#ifndef BNX2X_STOP_ON_ERROR
Mintz, Yuval92f85f02017-06-09 17:17:01 +03003902 } else {
Ariel Eliordc1ba592013-01-01 05:22:30 +00003903 /* used by FW for packet accounting */
3904 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Mintz, Yuval92f85f02017-06-09 17:17:01 +03003905 }
Yuval Mintzea36475a2014-08-25 17:48:30 +03003906#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003907 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003908
Dmitry Kravkov91226792013-03-11 05:17:52 +00003909 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3910
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003911 /* turn on parsing and get a BD */
3912 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003913
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003914 if (xmit_type & XMIT_CSUM)
3915 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003916
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003917 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003918 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003919 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003920
3921 if (xmit_type & XMIT_CSUM_ENC) {
3922 u16 global_data = 0;
3923
3924 /* Set PBD in enc checksum offload case */
3925 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3926 &pbd_e2_parsing_data,
3927 xmit_type);
3928
3929 /* turn on 2nd parsing and get a BD */
3930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3931
3932 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3933
3934 memset(pbd2, 0, sizeof(*pbd2));
3935
3936 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3937 (skb_inner_network_header(skb) -
3938 skb->data) >> 1;
3939
3940 if (xmit_type & XMIT_GSO_ENC)
3941 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3942 &global_data,
3943 xmit_type);
3944
3945 pbd2->global_data = cpu_to_le16(global_data);
3946
3947 /* add addition parse BD indication to start BD */
3948 SET_FLAG(tx_start_bd->general_data,
3949 ETH_TX_START_BD_PARSE_NBDS, 1);
3950 /* set encapsulation flag in start BD */
3951 SET_FLAG(tx_start_bd->general_data,
3952 ETH_TX_START_BD_TUNNEL_EXIST, 1);
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +03003953
3954 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3955
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003956 nbd++;
3957 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003958 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003959 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3960 &pbd_e2_parsing_data,
3961 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003962 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003963
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003964 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
Yuval Mintzbabe7232014-02-27 15:42:26 +02003965 /* Add the macs to the parsing BD if this is a vf or if
3966 * Tx Switching is enabled.
3967 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003968 if (IS_VF(bp)) {
3969 /* override GRE parameters in BD */
3970 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3971 &pbd_e2->data.mac_addr.src_mid,
3972 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003973 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003974
3975 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3976 &pbd_e2->data.mac_addr.dst_mid,
3977 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003978 eth->h_dest);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003979 } else {
3980 if (bp->flags & TX_SWITCHING)
3981 bnx2x_set_fw_mac_addr(
3982 &pbd_e2->data.mac_addr.dst_hi,
3983 &pbd_e2->data.mac_addr.dst_mid,
3984 &pbd_e2->data.mac_addr.dst_lo,
3985 eth->h_dest);
3986#ifdef BNX2X_STOP_ON_ERROR
3987 /* Enforce security is always set in Stop on Error -
3988 * source mac should be present in the parsing BD
3989 */
3990 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3991 &pbd_e2->data.mac_addr.src_mid,
3992 &pbd_e2->data.mac_addr.src_lo,
3993 eth->h_source);
3994#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003995 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003996
3997 SET_FLAG(pbd_e2_parsing_data,
3998 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003999 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004000 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004001 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004002 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4003 /* Set PBD in checksum offload case */
4004 if (xmit_type & XMIT_CSUM)
4005 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004006
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004007 SET_FLAG(global_data,
4008 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4009 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004010 }
4011
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004012 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004013 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4014 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004015 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4016 pkt_size = tx_start_bd->nbytes;
4017
Merav Sicron51c1a582012-03-18 10:33:38 +00004018 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004019 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004020 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004021 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004022 tx_start_bd->bd_flags.as_bitfield,
4023 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004024
4025 if (xmit_type & XMIT_GSO) {
4026
4027 DP(NETIF_MSG_TX_QUEUED,
4028 "TSO packet len %d hlen %d total len %d tso size %d\n",
4029 skb->len, hlen, skb_headlen(skb),
4030 skb_shinfo(skb)->gso_size);
4031
4032 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4033
Dmitry Kravkov91226792013-03-11 05:17:52 +00004034 if (unlikely(skb_headlen(skb) > hlen)) {
4035 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004036 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4037 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004038 bd_prod);
4039 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004040 if (!CHIP_IS_E1x(bp))
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004041 pbd_e2_parsing_data |=
4042 (skb_shinfo(skb)->gso_size <<
4043 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4044 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004045 else
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004046 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004047 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00004048
4049 /* Set the PBD's parsing_data field if not zero
4050 * (for the chips newer than 57711).
4051 */
4052 if (pbd_e2_parsing_data)
4053 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4054
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004055 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4056
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004057 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004058 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4059 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4060
Eric Dumazet9e903e02011-10-18 21:00:24 +00004061 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4062 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004063 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00004064 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004065
Merav Sicron51c1a582012-03-18 10:33:38 +00004066 DP(NETIF_MSG_TX_QUEUED,
4067 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004068
4069 /* we need unmap all buffers already mapped
4070 * for this SKB;
4071 * first_bd->nbd need to be properly updated
4072 * before call to bnx2x_free_tx_pkt
4073 */
4074 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004075 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00004076 TX_BD(txdata->tx_pkt_prod),
4077 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004078 return NETDEV_TX_OK;
4079 }
4080
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004081 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00004082 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004083 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00004084 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004085
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004086 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4087 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00004088 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4089 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004090 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004091
4092 DP(NETIF_MSG_TX_QUEUED,
4093 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4094 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4095 le16_to_cpu(tx_data_bd->nbytes));
4096 }
4097
4098 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4099
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004100 /* update with actual num BDs */
4101 first_bd->nbd = cpu_to_le16(nbd);
4102
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004103 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4104
4105 /* now send a tx doorbell, counting the next BD
4106 * if the packet contains or ends with it
4107 */
4108 if (TX_BD_POFF(bd_prod) < nbd)
4109 nbd++;
4110
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004111 /* total_pkt_bytes should be set on the first data BD if
4112 * it's not an LSO packet and there is more than one
4113 * data BD. In this case pkt_size is limited by an MTU value.
4114 * However we prefer to set it for an LSO packet (while we don't
4115 * have to) in order to save some CPU cycles in a none-LSO
4116 * case, when we much more care about them.
4117 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004118 if (total_pkt_bd != NULL)
4119 total_pkt_bd->total_pkt_bytes = pkt_size;
4120
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004121 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004122 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004124 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4125 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4126 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4127 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004128 if (pbd_e2)
4129 DP(NETIF_MSG_TX_QUEUED,
4130 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004131 pbd_e2,
4132 pbd_e2->data.mac_addr.dst_hi,
4133 pbd_e2->data.mac_addr.dst_mid,
4134 pbd_e2->data.mac_addr.dst_lo,
4135 pbd_e2->data.mac_addr.src_hi,
4136 pbd_e2->data.mac_addr.src_mid,
4137 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004138 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004139 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4140
Tom Herbert2df1a702011-11-28 16:33:37 +00004141 netdev_tx_sent_queue(txq, skb->len);
4142
Willem de Bruijn8373c572012-04-27 09:04:06 +00004143 skb_tx_timestamp(skb);
4144
Ariel Elior6383c0b2011-07-14 08:31:57 +00004145 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004146 /*
4147 * Make sure that the BD data is updated before updating the producer
4148 * since FW might read the BD right after the producer is updated.
4149 * This is only applicable for weak-ordered memory model archs such
4150 * as IA-64. The following barrier is also mandatory since FW will
4151 * assumes packets must have BDs.
4152 */
4153 wmb();
4154
Ariel Elior6383c0b2011-07-14 08:31:57 +00004155 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004156 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004157
Ariel Elior6383c0b2011-07-14 08:31:57 +00004158 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004159
4160 mmiowb();
4161
Ariel Elior6383c0b2011-07-14 08:31:57 +00004162 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004163
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004164 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004165 netif_tx_stop_queue(txq);
4166
4167 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4168 * ordering of set_bit() in netif_tx_stop_queue() and read of
4169 * fp->bd_tx_cons */
4170 smp_mb();
4171
Barak Witkowski15192a82012-06-19 07:48:28 +00004172 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004173 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004174 netif_tx_wake_queue(txq);
4175 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004176 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004177
4178 return NETDEV_TX_OK;
4179}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004180
Yuval Mintz230d00e2015-07-22 09:16:25 +03004181void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4182{
4183 int mfw_vn = BP_FW_MB_IDX(bp);
4184 u32 tmp;
4185
4186 /* If the shmem shouldn't affect configuration, reflect */
4187 if (!IS_MF_BD(bp)) {
4188 int i;
4189
4190 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4191 c2s_map[i] = i;
4192 *c2s_default = 0;
4193
4194 return;
4195 }
4196
4197 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4198 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4199 c2s_map[0] = tmp & 0xff;
4200 c2s_map[1] = (tmp >> 8) & 0xff;
4201 c2s_map[2] = (tmp >> 16) & 0xff;
4202 c2s_map[3] = (tmp >> 24) & 0xff;
4203
4204 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4205 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4206 c2s_map[4] = tmp & 0xff;
4207 c2s_map[5] = (tmp >> 8) & 0xff;
4208 c2s_map[6] = (tmp >> 16) & 0xff;
4209 c2s_map[7] = (tmp >> 24) & 0xff;
4210
4211 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4212 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4213 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4214}
4215
Ariel Elior6383c0b2011-07-14 08:31:57 +00004216/**
4217 * bnx2x_setup_tc - routine to configure net_device for multi tc
4218 *
4219 * @netdev: net device to configure
4220 * @tc: number of traffic classes to enable
4221 *
4222 * callback connected to the ndo_setup_tc function pointer
4223 */
4224int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4225{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004226 struct bnx2x *bp = netdev_priv(dev);
Yuval Mintz230d00e2015-07-22 09:16:25 +03004227 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4228 int cos, prio, count, offset;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004229
4230 /* setup tc must be called under rtnl lock */
4231 ASSERT_RTNL();
4232
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004233 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004234 if (!num_tc) {
4235 netdev_reset_tc(dev);
4236 return 0;
4237 }
4238
4239 /* requested to support too many traffic classes */
4240 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004241 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004242 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004243 return -EINVAL;
4244 }
4245
4246 /* declare amount of supported traffic classes */
4247 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004248 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004249 return -EINVAL;
4250 }
4251
Yuval Mintz230d00e2015-07-22 09:16:25 +03004252 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4253
Ariel Elior6383c0b2011-07-14 08:31:57 +00004254 /* configure priority to traffic class mapping */
4255 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
Yuval Mintz230d00e2015-07-22 09:16:25 +03004256 int outer_prio = c2s_map[prio];
4257
4258 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004259 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4260 "mapping priority %d to tc %d\n",
Yuval Mintz230d00e2015-07-22 09:16:25 +03004261 outer_prio, bp->prio_to_cos[outer_prio]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004262 }
4263
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004264 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004265 This can be used for ets or pfc, and save the effort of setting
4266 up a multio class queue disc or negotiating DCBX with a switch
4267 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004268 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004269 for (prio = 1; prio < 16; prio++) {
4270 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004271 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004272 } */
4273
4274 /* configure traffic class to transmission queue mapping */
4275 for (cos = 0; cos < bp->max_cos; cos++) {
4276 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004277 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004278 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004279 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4280 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004281 cos, offset, count);
4282 }
4283
4284 return 0;
4285}
4286
John Fastabend16e5cc62016-02-16 21:16:43 -08004287int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4288 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -08004289{
John Fastabend5eb4dce2016-02-29 11:26:13 -08004290 if (tc->type != TC_SETUP_MQPRIO)
John Fastabende4c67342016-02-16 21:16:15 -08004291 return -EINVAL;
Amritha Nambiar56f36ac2017-03-15 10:39:25 -07004292
4293 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4294
4295 return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
John Fastabende4c67342016-02-16 21:16:15 -08004296}
4297
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004298/* called with rtnl_lock */
4299int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4300{
4301 struct sockaddr *addr = p;
4302 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004303 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004304
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004305 if (!is_valid_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004306 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004307 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004308 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004309
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004310 if (IS_MF_STORAGE_ONLY(bp)) {
4311 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004312 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004313 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004314
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004315 if (netif_running(dev)) {
4316 rc = bnx2x_set_eth_mac(bp, false);
4317 if (rc)
4318 return rc;
4319 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004320
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004321 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4322
4323 if (netif_running(dev))
4324 rc = bnx2x_set_eth_mac(bp, true);
4325
Yuval Mintz230d00e2015-07-22 09:16:25 +03004326 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4327 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4328
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004329 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004330}
4331
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004332static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4333{
4334 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4335 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004336 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004337
4338 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004339
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004340 if (IS_FCOE_IDX(fp_index)) {
4341 memset(sb, 0, sizeof(union host_hc_status_block));
4342 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004343 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004344 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004345 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004346 BNX2X_PCI_FREE(sb->e2_sb,
4347 bnx2x_fp(bp, fp_index,
4348 status_blk_mapping),
4349 sizeof(struct host_hc_status_block_e2));
4350 else
4351 BNX2X_PCI_FREE(sb->e1x_sb,
4352 bnx2x_fp(bp, fp_index,
4353 status_blk_mapping),
4354 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004355 }
Merav Sicron55c11942012-11-07 00:45:48 +00004356
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004357 /* Rx */
4358 if (!skip_rx_queue(bp, fp_index)) {
4359 bnx2x_free_rx_bds(fp);
4360
4361 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4362 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4363 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4364 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4365 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4366
4367 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4368 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4369 sizeof(struct eth_fast_path_rx_cqe) *
4370 NUM_RCQ_BD);
4371
4372 /* SGE ring */
4373 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4374 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4375 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4376 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4377 }
4378
4379 /* Tx */
4380 if (!skip_tx_queue(bp, fp_index)) {
4381 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004382 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004383 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004384
Merav Sicron51c1a582012-03-18 10:33:38 +00004385 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004386 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004387 fp_index, cos, txdata->cid);
4388
4389 BNX2X_FREE(txdata->tx_buf_ring);
4390 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4391 txdata->tx_desc_mapping,
4392 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4393 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004394 }
4395 /* end of fastpath */
4396}
4397
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004398static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004399{
4400 int i;
4401 for_each_cnic_queue(bp, i)
4402 bnx2x_free_fp_mem_at(bp, i);
4403}
4404
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004405void bnx2x_free_fp_mem(struct bnx2x *bp)
4406{
4407 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004408 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004409 bnx2x_free_fp_mem_at(bp, i);
4410}
4411
Eric Dumazet1191cb82012-04-27 21:39:21 +00004412static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004413{
4414 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004415 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004416 bnx2x_fp(bp, index, sb_index_values) =
4417 (__le16 *)status_blk.e2_sb->sb.index_values;
4418 bnx2x_fp(bp, index, sb_running_index) =
4419 (__le16 *)status_blk.e2_sb->sb.running_index;
4420 } else {
4421 bnx2x_fp(bp, index, sb_index_values) =
4422 (__le16 *)status_blk.e1x_sb->sb.index_values;
4423 bnx2x_fp(bp, index, sb_running_index) =
4424 (__le16 *)status_blk.e1x_sb->sb.running_index;
4425 }
4426}
4427
Eric Dumazet1191cb82012-04-27 21:39:21 +00004428/* Returns the number of actually allocated BDs */
4429static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4430 int rx_ring_size)
4431{
4432 struct bnx2x *bp = fp->bp;
4433 u16 ring_prod, cqe_ring_prod;
4434 int i, failure_cnt = 0;
4435
4436 fp->rx_comp_cons = 0;
4437 cqe_ring_prod = ring_prod = 0;
4438
4439 /* This routine is called only during fo init so
4440 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4441 */
4442 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004443 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004444 failure_cnt++;
4445 continue;
4446 }
4447 ring_prod = NEXT_RX_IDX(ring_prod);
4448 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4449 WARN_ON(ring_prod <= (i - failure_cnt));
4450 }
4451
4452 if (failure_cnt)
4453 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4454 i - failure_cnt, fp->index);
4455
4456 fp->rx_bd_prod = ring_prod;
4457 /* Limit the CQE producer by the CQE ring size */
4458 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4459 cqe_ring_prod);
Eric Dumazet1191cb82012-04-27 21:39:21 +00004460
Barak Witkowski15192a82012-06-19 07:48:28 +00004461 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004462
4463 return i - failure_cnt;
4464}
4465
4466static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4467{
4468 int i;
4469
4470 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4471 struct eth_rx_cqe_next_page *nextpg;
4472
4473 nextpg = (struct eth_rx_cqe_next_page *)
4474 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4475 nextpg->addr_hi =
4476 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4477 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4478 nextpg->addr_lo =
4479 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4480 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4481 }
4482}
4483
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004484static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4485{
4486 union host_hc_status_block *sb;
4487 struct bnx2x_fastpath *fp = &bp->fp[index];
4488 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004489 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004490 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004491
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004492 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004493 rx_ring_size = MIN_RX_SIZE_NONTPA;
4494 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004495 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004496 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4497
Yuval Mintz065f8b92012-10-03 04:22:59 +00004498 if (CHIP_IS_E3(bp)) {
4499 u32 cfg = SHMEM_RD(bp,
4500 dev_info.port_hw_config[BP_PORT(bp)].
4501 default_cfg);
4502
4503 /* Decrease ring size for 1G functions */
4504 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4505 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4506 rx_ring_size /= 10;
4507 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004508
David S. Miller8decf862011-09-22 03:23:13 -04004509 /* allocate at least number of buffers required by FW */
4510 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4511 MIN_RX_SIZE_TPA, rx_ring_size);
4512
4513 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004514 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004515 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004516
Yuval Mintz04c46732013-01-23 03:21:46 +00004517 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4518
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004519 /* Common */
4520 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004521
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004522 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004523 /* status blocks */
Joe Perchescd2b0382014-02-20 13:25:51 -08004524 if (!CHIP_IS_E1x(bp)) {
4525 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4526 sizeof(struct host_hc_status_block_e2));
4527 if (!sb->e2_sb)
4528 goto alloc_mem_err;
4529 } else {
4530 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4531 sizeof(struct host_hc_status_block_e1x));
4532 if (!sb->e1x_sb)
4533 goto alloc_mem_err;
4534 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004535 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004536
4537 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4538 * set shortcuts for it.
4539 */
4540 if (!IS_FCOE_IDX(index))
4541 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004542
4543 /* Tx */
4544 if (!skip_tx_queue(bp, index)) {
4545 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004546 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004547 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004548
Merav Sicron51c1a582012-03-18 10:33:38 +00004549 DP(NETIF_MSG_IFUP,
4550 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004551 index, cos);
4552
Joe Perchescd2b0382014-02-20 13:25:51 -08004553 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4554 sizeof(struct sw_tx_bd),
4555 GFP_KERNEL);
4556 if (!txdata->tx_buf_ring)
4557 goto alloc_mem_err;
4558 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4559 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4560 if (!txdata->tx_desc_ring)
4561 goto alloc_mem_err;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004562 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004563 }
4564
4565 /* Rx */
4566 if (!skip_rx_queue(bp, index)) {
4567 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Joe Perchescd2b0382014-02-20 13:25:51 -08004568 bnx2x_fp(bp, index, rx_buf_ring) =
4569 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4570 if (!bnx2x_fp(bp, index, rx_buf_ring))
4571 goto alloc_mem_err;
4572 bnx2x_fp(bp, index, rx_desc_ring) =
4573 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4574 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4575 if (!bnx2x_fp(bp, index, rx_desc_ring))
4576 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004577
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004578 /* Seed all CQEs by 1s */
Joe Perchescd2b0382014-02-20 13:25:51 -08004579 bnx2x_fp(bp, index, rx_comp_ring) =
4580 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4581 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4582 if (!bnx2x_fp(bp, index, rx_comp_ring))
4583 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004584
4585 /* SGE ring */
Joe Perchescd2b0382014-02-20 13:25:51 -08004586 bnx2x_fp(bp, index, rx_page_ring) =
4587 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4588 GFP_KERNEL);
4589 if (!bnx2x_fp(bp, index, rx_page_ring))
4590 goto alloc_mem_err;
4591 bnx2x_fp(bp, index, rx_sge_ring) =
4592 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4593 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4594 if (!bnx2x_fp(bp, index, rx_sge_ring))
4595 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004596 /* RX BD ring */
4597 bnx2x_set_next_page_rx_bd(fp);
4598
4599 /* CQ ring */
4600 bnx2x_set_next_page_rx_cq(fp);
4601
4602 /* BDs */
4603 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4604 if (ring_size < rx_ring_size)
4605 goto alloc_mem_err;
4606 }
4607
4608 return 0;
4609
4610/* handles low memory cases */
4611alloc_mem_err:
4612 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4613 index, ring_size);
4614 /* FW will drop all packets if queue is not big enough,
4615 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004616 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004617 */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02004618 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004619 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004620 /* release memory allocated for this queue */
4621 bnx2x_free_fp_mem_at(bp, index);
4622 return -ENOMEM;
4623 }
4624 return 0;
4625}
4626
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004627static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004628{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004629 if (!NO_FCOE(bp))
4630 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004631 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004632 /* we will fail load process instead of mark
4633 * NO_FCOE_FLAG
4634 */
4635 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004636
4637 return 0;
4638}
4639
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004640static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004641{
4642 int i;
4643
4644 /* 1. Allocate FP for leading - fatal if error
4645 * 2. Allocate RSS - fix number of queues if error
4646 */
4647
4648 /* leading */
4649 if (bnx2x_alloc_fp_mem_at(bp, 0))
4650 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004651
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004652 /* RSS */
4653 for_each_nondefault_eth_queue(bp, i)
4654 if (bnx2x_alloc_fp_mem_at(bp, i))
4655 break;
4656
4657 /* handle memory failures */
4658 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4659 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4660
4661 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004662 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004663 if (CNIC_SUPPORT(bp))
4664 /* move non eth FPs next to last eth FP
4665 * must be done in that order
4666 * FCOE_IDX < FWD_IDX < OOO_IDX
4667 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004668
Merav Sicron55c11942012-11-07 00:45:48 +00004669 /* move FCoE fp even NO_FCOE_FLAG is on */
4670 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4671 bp->num_ethernet_queues -= delta;
4672 bp->num_queues = bp->num_ethernet_queues +
4673 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004674 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4675 bp->num_queues + delta, bp->num_queues);
4676 }
4677
4678 return 0;
4679}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004680
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004681void bnx2x_free_mem_bp(struct bnx2x *bp)
4682{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004683 int i;
4684
4685 for (i = 0; i < bp->fp_array_size; i++)
4686 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004687 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004688 kfree(bp->sp_objs);
4689 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004690 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004691 kfree(bp->msix_table);
4692 kfree(bp->ilt);
4693}
4694
Bill Pemberton0329aba2012-12-03 09:24:24 -05004695int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004696{
4697 struct bnx2x_fastpath *fp;
4698 struct msix_entry *tbl;
4699 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004700 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004701 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004702 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004703
Ariel Elior6383c0b2011-07-14 08:31:57 +00004704 /*
4705 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004706 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004707 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004708 msix_table_size = bp->igu_sb_cnt;
4709 if (IS_PF(bp))
4710 msix_table_size++;
4711 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004712
4713 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004714 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004715 bp->fp_array_size = fp_array_size;
4716 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004717
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004718 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004719 if (!fp)
4720 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004721 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004722 fp[i].tpa_info =
4723 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4724 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4725 if (!(fp[i].tpa_info))
4726 goto alloc_err;
4727 }
4728
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004729 bp->fp = fp;
4730
Barak Witkowski15192a82012-06-19 07:48:28 +00004731 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004732 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004733 GFP_KERNEL);
4734 if (!bp->sp_objs)
4735 goto alloc_err;
4736
4737 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004738 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004739 GFP_KERNEL);
4740 if (!bp->fp_stats)
4741 goto alloc_err;
4742
Merav Sicron65565882012-06-19 07:48:26 +00004743 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004744 txq_array_size =
4745 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4746 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4747
4748 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4749 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004750 if (!bp->bnx2x_txq)
4751 goto alloc_err;
4752
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004753 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004754 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004755 if (!tbl)
4756 goto alloc_err;
4757 bp->msix_table = tbl;
4758
4759 /* ilt */
4760 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4761 if (!ilt)
4762 goto alloc_err;
4763 bp->ilt = ilt;
4764
4765 return 0;
4766alloc_err:
4767 bnx2x_free_mem_bp(bp);
4768 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004769}
4770
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004771int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004772{
4773 struct bnx2x *bp = netdev_priv(dev);
4774
4775 if (unlikely(!netif_running(dev)))
4776 return 0;
4777
Yuval Mintz5d07d862012-09-13 02:56:21 +00004778 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004779 return bnx2x_nic_load(bp, LOAD_NORMAL);
4780}
4781
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004782int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4783{
4784 u32 sel_phy_idx = 0;
4785 if (bp->link_params.num_phys <= 1)
4786 return INT_PHY;
4787
4788 if (bp->link_vars.link_up) {
4789 sel_phy_idx = EXT_PHY1;
4790 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4791 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4792 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4793 sel_phy_idx = EXT_PHY2;
4794 } else {
4795
4796 switch (bnx2x_phy_selection(&bp->link_params)) {
4797 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4798 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4799 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4800 sel_phy_idx = EXT_PHY1;
4801 break;
4802 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4803 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4804 sel_phy_idx = EXT_PHY2;
4805 break;
4806 }
4807 }
4808
4809 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004810}
4811int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4812{
4813 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4814 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004815 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004816 * swapping is enabled). So when swapping is enabled, we need to reverse
4817 * the configuration
4818 */
4819
4820 if (bp->link_params.multi_phy_config &
4821 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4822 if (sel_phy_idx == EXT_PHY1)
4823 sel_phy_idx = EXT_PHY2;
4824 else if (sel_phy_idx == EXT_PHY2)
4825 sel_phy_idx = EXT_PHY1;
4826 }
4827 return LINK_CONFIG_IDX(sel_phy_idx);
4828}
4829
Merav Sicron55c11942012-11-07 00:45:48 +00004830#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004831int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4832{
4833 struct bnx2x *bp = netdev_priv(dev);
4834 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4835
4836 switch (type) {
4837 case NETDEV_FCOE_WWNN:
4838 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4839 cp->fcoe_wwn_node_name_lo);
4840 break;
4841 case NETDEV_FCOE_WWPN:
4842 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4843 cp->fcoe_wwn_port_name_lo);
4844 break;
4845 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004846 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004847 return -EINVAL;
4848 }
4849
4850 return 0;
4851}
4852#endif
4853
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004854/* called with rtnl_lock */
4855int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4856{
4857 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004858
Yuval Mintz0650c0b2015-05-04 12:34:12 +03004859 if (pci_num_vf(bp->pdev)) {
4860 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4861 return -EPERM;
4862 }
4863
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004864 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004865 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004866 return -EAGAIN;
4867 }
4868
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004869 /* This does not race with packet allocation
4870 * because the actual alloc size is
4871 * only updated as part of load
4872 */
4873 dev->mtu = new_mtu;
4874
Yuval Mintz230d00e2015-07-22 09:16:25 +03004875 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4876 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4877
Michał Mirosław66371c42011-04-12 09:38:23 +00004878 return bnx2x_reload_if_running(dev);
4879}
4880
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004881netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004882 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004883{
4884 struct bnx2x *bp = netdev_priv(dev);
4885
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004886 if (pci_num_vf(bp->pdev)) {
4887 netdev_features_t changed = dev->features ^ features;
4888
4889 /* Revert the requested changes in features if they
4890 * would require internal reload of PF in bnx2x_set_features().
4891 */
4892 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4893 features &= ~NETIF_F_RXCSUM;
4894 features |= dev->features & NETIF_F_RXCSUM;
4895 }
4896
4897 if (changed & NETIF_F_LOOPBACK) {
4898 features &= ~NETIF_F_LOOPBACK;
4899 features |= dev->features & NETIF_F_LOOPBACK;
4900 }
4901 }
4902
Michał Mirosław66371c42011-04-12 09:38:23 +00004903 /* TPA requires Rx CSUM offloading */
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004904 if (!(features & NETIF_F_RXCSUM)) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004905 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004906 features &= ~NETIF_F_GRO;
4907 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004908
4909 return features;
4910}
4911
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004912int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004913{
4914 struct bnx2x *bp = netdev_priv(dev);
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004915 netdev_features_t changes = features ^ dev->features;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004916 bool bnx2x_reload = false;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004917 int rc;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004918
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004919 /* VFs or non SRIOV PFs should be able to change loopback feature */
4920 if (!pci_num_vf(bp->pdev)) {
4921 if (features & NETIF_F_LOOPBACK) {
4922 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4923 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4924 bnx2x_reload = true;
4925 }
4926 } else {
4927 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4928 bp->link_params.loopback_mode = LOOPBACK_NONE;
4929 bnx2x_reload = true;
4930 }
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004931 }
4932 }
4933
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004934 /* if GRO is changed while LRO is enabled, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004935 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4936 changes &= ~NETIF_F_GRO;
Eric Dumazet8802f572013-05-18 07:14:53 +00004937
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004938 /* if GRO is changed while HW TPA is off, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004939 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4940 changes &= ~NETIF_F_GRO;
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004941
Eric Dumazet8802f572013-05-18 07:14:53 +00004942 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004943 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004944
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004945 if (bnx2x_reload) {
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004946 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4947 dev->features = features;
4948 rc = bnx2x_reload_if_running(dev);
4949 return rc ? rc : 1;
4950 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004951 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004952 }
4953
Michał Mirosław66371c42011-04-12 09:38:23 +00004954 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004955}
4956
4957void bnx2x_tx_timeout(struct net_device *dev)
4958{
4959 struct bnx2x *bp = netdev_priv(dev);
4960
4961#ifdef BNX2X_STOP_ON_ERROR
4962 if (!bp->panic)
4963 bnx2x_panic();
4964#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004965
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004966 /* This allows the netif to be shutdown gracefully before resetting */
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004967 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004968}
4969
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004970int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4971{
4972 struct net_device *dev = pci_get_drvdata(pdev);
4973 struct bnx2x *bp;
4974
4975 if (!dev) {
4976 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4977 return -ENODEV;
4978 }
4979 bp = netdev_priv(dev);
4980
4981 rtnl_lock();
4982
4983 pci_save_state(pdev);
4984
4985 if (!netif_running(dev)) {
4986 rtnl_unlock();
4987 return 0;
4988 }
4989
4990 netif_device_detach(dev);
4991
Yuval Mintz5d07d862012-09-13 02:56:21 +00004992 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004993
4994 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4995
4996 rtnl_unlock();
4997
4998 return 0;
4999}
5000
5001int bnx2x_resume(struct pci_dev *pdev)
5002{
5003 struct net_device *dev = pci_get_drvdata(pdev);
5004 struct bnx2x *bp;
5005 int rc;
5006
5007 if (!dev) {
5008 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5009 return -ENODEV;
5010 }
5011 bp = netdev_priv(dev);
5012
5013 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005014 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005015 return -EAGAIN;
5016 }
5017
5018 rtnl_lock();
5019
5020 pci_restore_state(pdev);
5021
5022 if (!netif_running(dev)) {
5023 rtnl_unlock();
5024 return 0;
5025 }
5026
5027 bnx2x_set_power_state(bp, PCI_D0);
5028 netif_device_attach(dev);
5029
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005030 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5031
5032 rtnl_unlock();
5033
5034 return rc;
5035}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005036
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005037void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5038 u32 cid)
5039{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03005040 if (!cxt) {
5041 BNX2X_ERR("bad context pointer %p\n", cxt);
5042 return;
5043 }
5044
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005045 /* ustorm cxt validation */
5046 cxt->ustorm_ag_context.cdu_usage =
5047 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5048 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5049 /* xcontext validation */
5050 cxt->xstorm_ag_context.cdu_reserved =
5051 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5052 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5053}
5054
Eric Dumazet1191cb82012-04-27 21:39:21 +00005055static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5056 u8 fw_sb_id, u8 sb_index,
5057 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005058{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005059 u32 addr = BAR_CSTRORM_INTMEM +
5060 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5061 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00005062 DP(NETIF_MSG_IFUP,
5063 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5064 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005065}
5066
Eric Dumazet1191cb82012-04-27 21:39:21 +00005067static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5068 u16 fw_sb_id, u8 sb_index,
5069 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005070{
5071 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5072 u32 addr = BAR_CSTRORM_INTMEM +
5073 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005074 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005075 /* clear and set */
5076 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5077 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005078 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00005079 DP(NETIF_MSG_IFUP,
5080 "port %x fw_sb_id %d sb_index %d disable %d\n",
5081 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005082}
5083
5084void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5085 u8 sb_index, u8 disable, u16 usec)
5086{
5087 int port = BP_PORT(bp);
5088 u8 ticks = usec / BNX2X_BTR;
5089
5090 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5091
5092 disable = disable ? 1 : (usec ? 0 : 1);
5093 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5094}
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005095
5096void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5097 u32 verbose)
5098{
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005099 smp_mb__before_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005100 set_bit(flag, &bp->sp_rtnl_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005101 smp_mb__after_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005102 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5103 flag);
5104 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5105}