blob: f7fbdc9d132511b72df0db2ce0b92e4df0774c44 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Ariel Elior08f6dd82014-05-27 13:11:36 +03009 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Amir Vadaic9931892014-08-25 16:06:54 +030024#include <linux/crash_dump.h>
Yuval Mintz99690852013-01-14 05:11:49 +000025#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000026#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070027#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030028#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000029#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000030#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000032#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
stephen hemmingera8f47eb2014-01-09 22:20:11 -080034static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
37static int bnx2x_poll(struct napi_struct *napi, int budget);
38
39static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
40{
41 int i;
42
43 /* Add NAPI objects */
44 for_each_rx_queue_cnic(bp, i) {
45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
46 bnx2x_poll, NAPI_POLL_WEIGHT);
47 napi_hash_add(&bnx2x_fp(bp, i, napi));
48 }
49}
50
51static void bnx2x_add_all_napi(struct bnx2x *bp)
52{
53 int i;
54
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 napi_hash_add(&bnx2x_fp(bp, i, napi));
60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
Michal Schmidt7d0445d2014-02-25 16:04:24 +010065 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
Michal Schmidtff2ad302014-02-25 16:04:25 +010066
67 /* Reduce memory usage in kdump environment by using only one queue */
Amir Vadaic9931892014-08-25 16:06:54 +030068 if (is_kdump_kernel())
Michal Schmidtff2ad302014-02-25 16:04:25 +010069 nq = 1;
70
Michal Schmidt7d0445d2014-02-25 16:04:24 +010071 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
stephen hemmingera8f47eb2014-01-09 22:20:11 -080073}
74
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000075/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000076 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000083 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000085 * source onto the target. Update txdata pointers and related
86 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000087 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000092 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000096 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030098 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000099
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000106
Yuval Mintz34d56262013-08-28 01:13:01 +0300107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
Barak Witkowski15192a82012-06-19 07:48:28 +0000112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
Merav Sicron65565882012-06-19 07:48:26 +0000118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
Yuval Mintz4864a162013-01-10 04:53:39 +0000131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000135}
136
Ariel Elior8ca5e172013-01-01 05:22:34 +0000137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000162 }
163}
164
David S. Miller4b87f922013-01-15 15:05:59 -0500165/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000176 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000198{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100205 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
Merav Sicron51c1a582012-03-18 10:33:38 +0000210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000211 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000212
Ariel Elior6383c0b2011-07-14 08:31:57 +0000213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +0300231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000250 /* now free frags */
251 while (nbd > 0) {
252
Ariel Elior6383c0b2011-07-14 08:31:57 +0000253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000262 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
Yuval Mintze1615902015-08-10 12:49:35 +0300265 dev_kfree_skb_any(skb);
Tom Herbert2df1a702011-11-28 16:33:37 +0000266 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000267
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
Ariel Elior6383c0b2011-07-14 08:31:57 +0000274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000275{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000276 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000278 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
Ariel Elior6383c0b2011-07-14 08:31:57 +0000285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
Merav Sicron51c1a582012-03-18 10:33:38 +0000294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000297
Tom Herbert2df1a702011-11-28 16:33:37 +0000298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000299 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000300
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000301 sw_cons++;
302 }
303
Tom Herbert2df1a702011-11-28 16:33:37 +0000304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
Ariel Elior6383c0b2011-07-14 08:31:57 +0000306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317 */
318 smp_mb();
319
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000320 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000321 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000355{
356 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000374 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000376
377 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
Yuval Mintz2de67432013-01-23 03:21:43 +0000405/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000409 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800410 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000411{
Yuval Mintz2de67432013-01-23 03:21:43 +0000412 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000413 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
Eric Dumazete52fcb22011-11-14 06:05:34 +0000422 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000423 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800424 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000425 return 0;
426}
427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000429 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300430 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000439
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
Eric Dumazete52fcb22011-11-14 06:05:34 +0000444 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300445 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000446 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000453
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000456 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
460
Eric Dumazete52fcb22011-11-14 06:05:34 +0000461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000464 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000482 tpa_info->gro_size = gro_size;
483 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000488 fp->tpa_queue_used);
489#endif
490}
491
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000498 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000499 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000500 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000504 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000508 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000509 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000513{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000514 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300515 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000516 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000520 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300524 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000542}
543
Michal Schmidt996dedb2013-09-05 22:13:09 +0200544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000546{
Eric Dumazet1191cb82012-04-27 21:39:21 +0000547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000550 dma_addr_t mapping;
551
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300552 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
553
554 /* put page reference used by the memory pool, since we
555 * won't be using this page as the mempool anymore.
556 */
557 if (pool->page)
558 put_page(pool->page);
559
560 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
561 if (unlikely(!pool->page)) {
562 BNX2X_ERR("Can't alloc sge\n");
563 return -ENOMEM;
564 }
565
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300566 pool->offset = 0;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000567 }
568
Michal Schmidt80316122015-06-26 17:50:00 +0200569 mapping = dma_map_page(&bp->pdev->dev, pool->page,
570 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
571 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
572 BNX2X_ERR("Can't map sge\n");
573 return -ENOMEM;
574 }
575
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300576 get_page(pool->page);
577 sw_buf->page = pool->page;
578 sw_buf->offset = pool->offset;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000579
Eric Dumazet1191cb82012-04-27 21:39:21 +0000580 dma_unmap_addr_set(sw_buf, mapping, mapping);
581
582 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
583 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
584
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300585 pool->offset += SGE_PAGE_SIZE;
586
Eric Dumazet1191cb82012-04-27 21:39:21 +0000587 return 0;
588}
589
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000590static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000591 struct bnx2x_agg_info *tpa_info,
592 u16 pages,
593 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300594 struct eth_end_agg_rx_cqe *cqe,
595 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000596{
597 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000598 u32 i, frag_len, frag_size;
599 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300600 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000601 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000602
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300603 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000604
605 if (fp->mode == TPA_MODE_GRO) {
606 gro_size = tpa_info->gro_size;
607 full_page = tpa_info->full_page;
608 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000609
610 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000611 if (frag_size)
612 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000613 le16_to_cpu(cqe->pkt_len),
614 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000615
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000616#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000617 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000618 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
619 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300620 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000621 bnx2x_panic();
622 return -EINVAL;
623 }
624#endif
625
626 /* Run through the SGL and compose the fragmented skb */
627 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300628 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000629
630 /* FW gives the indices of the SGE as if the ring is an array
631 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000632 if (fp->mode == TPA_MODE_GRO)
633 frag_len = min_t(u32, frag_size, (u32)full_page);
634 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000635 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000636
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000637 rx_pg = &fp->rx_page_ring[sge_idx];
638 old_rx_pg = *rx_pg;
639
640 /* If we fail to allocate a substitute page, we simply stop
641 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200642 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000643 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000644 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000645 return err;
646 }
647
Michal Schmidt80316122015-06-26 17:50:00 +0200648 dma_unmap_page(&bp->pdev->dev,
649 dma_unmap_addr(&old_rx_pg, mapping),
650 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000651 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000652 if (fp->mode == TPA_MODE_LRO)
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300653 skb_fill_page_desc(skb, j, old_rx_pg.page,
654 old_rx_pg.offset, frag_len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000655 else { /* GRO */
656 int rem;
657 int offset = 0;
658 for (rem = frag_len; rem > 0; rem -= gro_size) {
659 int len = rem > gro_size ? gro_size : rem;
660 skb_fill_page_desc(skb, frag_id++,
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300661 old_rx_pg.page,
662 old_rx_pg.offset + offset,
663 len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000664 if (offset)
665 get_page(old_rx_pg.page);
666 offset += len;
667 }
668 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669
670 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000671 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000672 skb->len += frag_len;
673
674 frag_size -= frag_len;
675 }
676
677 return 0;
678}
679
Eric Dumazetd46d1322012-12-10 12:16:06 +0000680static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
681{
682 if (fp->rx_frag_size)
Alexander Duycke51423d2015-05-06 21:12:31 -0700683 skb_free_frag(data);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000684 else
685 kfree(data);
686}
687
Michal Schmidt996dedb2013-09-05 22:13:09 +0200688static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000689{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200690 if (fp->rx_frag_size) {
691 /* GFP_KERNEL allocations are used only during initialization */
692 if (unlikely(gfp_mask & __GFP_WAIT))
693 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000694
Michal Schmidt996dedb2013-09-05 22:13:09 +0200695 return netdev_alloc_frag(fp->rx_frag_size);
696 }
697
698 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000699}
700
Yuval Mintz99690852013-01-14 05:11:49 +0000701#ifdef CONFIG_INET
702static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
703{
704 const struct iphdr *iph = ip_hdr(skb);
705 struct tcphdr *th;
706
707 skb_set_transport_header(skb, sizeof(struct iphdr));
708 th = tcp_hdr(skb);
709
710 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
711 iph->saddr, iph->daddr, 0);
712}
713
714static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
715{
716 struct ipv6hdr *iph = ipv6_hdr(skb);
717 struct tcphdr *th;
718
719 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
720 th = tcp_hdr(skb);
721
722 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
723 &iph->saddr, &iph->daddr, 0);
724}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000725
726static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
727 void (*gro_func)(struct bnx2x*, struct sk_buff*))
728{
729 skb_set_network_header(skb, 0);
730 gro_func(bp, skb);
731 tcp_gro_complete(skb);
732}
Yuval Mintz99690852013-01-14 05:11:49 +0000733#endif
734
735static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
736 struct sk_buff *skb)
737{
738#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000739 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000740 switch (be16_to_cpu(skb->protocol)) {
741 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000742 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000743 break;
744 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000745 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000746 break;
747 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000748 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000749 be16_to_cpu(skb->protocol));
750 }
Yuval Mintz99690852013-01-14 05:11:49 +0000751 }
752#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700753 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000754 napi_gro_receive(&fp->napi, skb);
755}
756
Eric Dumazet1191cb82012-04-27 21:39:21 +0000757static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
758 struct bnx2x_agg_info *tpa_info,
759 u16 pages,
760 struct eth_end_agg_rx_cqe *cqe,
761 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000762{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300763 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000764 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300765 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000766 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000767 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300768 u8 old_tpa_state = tpa_info->tpa_state;
769
770 tpa_info->tpa_state = BNX2X_TPA_STOP;
771
772 /* If we there was an error during the handling of the TPA_START -
773 * drop this aggregation.
774 */
775 if (old_tpa_state == BNX2X_TPA_ERROR)
776 goto drop;
777
Eric Dumazete52fcb22011-11-14 06:05:34 +0000778 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200779 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000780 /* Unmap skb in the pool anyway, as we are going to change
781 pool entry status to BNX2X_TPA_STOP even if new skb allocation
782 fails. */
783 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800784 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000785 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000786 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000787
Eric Dumazete52fcb22011-11-14 06:05:34 +0000788 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000789#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800790 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000791 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800792 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000793 bnx2x_panic();
794 return;
795 }
796#endif
797
Eric Dumazete52fcb22011-11-14 06:05:34 +0000798 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000799 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800800 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000801
802 skb->protocol = eth_type_trans(skb, bp->dev);
803 skb->ip_summed = CHECKSUM_UNNECESSARY;
804
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000805 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
806 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300807 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000808 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000809 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000810 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000811 DP(NETIF_MSG_RX_STATUS,
812 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000813 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000814 }
815
Eric Dumazete52fcb22011-11-14 06:05:34 +0000816 /* put new data in bin */
817 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300819 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000820 }
Eric Dumazet07b0f002014-06-26 00:44:02 -0700821 if (new_data)
822 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300823drop:
824 /* drop the packet and keep the buffer in the bin */
825 DP(NETIF_MSG_RX_STATUS,
826 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000827 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000828}
829
Michal Schmidt996dedb2013-09-05 22:13:09 +0200830static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
831 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000832{
833 u8 *data;
834 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
835 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
836 dma_addr_t mapping;
837
Michal Schmidt996dedb2013-09-05 22:13:09 +0200838 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000839 if (unlikely(data == NULL))
840 return -ENOMEM;
841
842 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
843 fp->rx_buf_size,
844 DMA_FROM_DEVICE);
845 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000846 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000847 BNX2X_ERR("Can't map rx data\n");
848 return -ENOMEM;
849 }
850
851 rx_buf->data = data;
852 dma_unmap_addr_set(rx_buf, mapping, mapping);
853
854 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
855 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
856
857 return 0;
858}
859
Barak Witkowski15192a82012-06-19 07:48:28 +0000860static
861void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
862 struct bnx2x_fastpath *fp,
863 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000864{
Michal Schmidte4889212012-09-13 12:59:44 +0000865 /* Do nothing if no L4 csum validation was done.
866 * We do not check whether IP csum was validated. For IPv4 we assume
867 * that if the card got as far as validating the L4 csum, it also
868 * validated the IP csum. IPv6 has no IP csum.
869 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000870 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000871 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000872 return;
873
Michal Schmidte4889212012-09-13 12:59:44 +0000874 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000875
876 if (cqe->fast_path_cqe.type_error_flags &
877 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
878 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000879 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000880 else
881 skb->ip_summed = CHECKSUM_UNNECESSARY;
882}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000883
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800884static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000885{
886 struct bnx2x *bp = fp->bp;
887 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300888 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300890 union eth_rx_cqe *cqe;
891 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892
893#ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
895 return 0;
896#endif
Eric W. Biedermanb3529742014-03-14 17:57:59 -0700897 if (budget <= 0)
898 return rx_pkt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000899
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000900 bd_cons = fp->rx_bd_cons;
901 bd_prod = fp->rx_bd_prod;
902 bd_prod_fw = bd_prod;
903 sw_comp_cons = fp->rx_comp_cons;
904 sw_comp_prod = fp->rx_comp_prod;
905
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300906 comp_ring_cons = RCQ_BD(sw_comp_cons);
907 cqe = &fp->rx_comp_ring[comp_ring_cons];
908 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000909
910 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300911 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000912
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300913 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000914 struct sw_rx_bd *rx_buf = NULL;
915 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300917 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000918 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000919 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800920 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800921 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000922
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300923#ifdef BNX2X_STOP_ON_ERROR
924 if (unlikely(bp->panic))
925 return 0;
926#endif
927
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000928 bd_prod = RX_BD(bd_prod);
929 bd_cons = RX_BD(bd_cons);
930
wenxiong@linux.vnet.ibm.com9aaae042014-06-03 14:14:46 -0500931 /* A rmb() is required to ensure that the CQE is not read
932 * before it is written by the adapter DMA. PCI ordering
933 * rules will make sure the other fields are written before
934 * the marker at the end of struct eth_fast_path_rx_cqe
935 * but without rmb() a weakly ordered processor can process
936 * stale data. Without the barrier TPA state-machine might
937 * enter inconsistent state and kernel stack might be
938 * provided with incorrect packet description - these lead
939 * to various kernel crashed.
940 */
941 rmb();
942
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300943 cqe_fp_flags = cqe_fp->type_error_flags;
944 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000945
Merav Sicron51c1a582012-03-18 10:33:38 +0000946 DP(NETIF_MSG_RX_STATUS,
947 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
948 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300949 cqe_fp_flags, cqe_fp->status_flags,
950 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000951 le16_to_cpu(cqe_fp->vlan_tag),
952 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000953
954 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300955 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000956 bnx2x_sp_event(fp, cqe);
957 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000958 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000959
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 rx_buf = &fp->rx_buf_ring[bd_cons];
961 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000962
Eric Dumazete52fcb22011-11-14 06:05:34 +0000963 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000964 struct bnx2x_agg_info *tpa_info;
965 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300966#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000967 /* sanity check */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200968 if (fp->mode == TPA_MODE_DISABLED &&
Eric Dumazete52fcb22011-11-14 06:05:34 +0000969 (CQE_TYPE_START(cqe_fp_type) ||
970 CQE_TYPE_STOP(cqe_fp_type)))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200971 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000972 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300973#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974
Eric Dumazete52fcb22011-11-14 06:05:34 +0000975 if (CQE_TYPE_START(cqe_fp_type)) {
976 u16 queue = cqe_fp->queue_index;
977 DP(NETIF_MSG_RX_STATUS,
978 "calling tpa_start on queue %d\n",
979 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980
Eric Dumazete52fcb22011-11-14 06:05:34 +0000981 bnx2x_tpa_start(fp, queue,
982 bd_cons, bd_prod,
983 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000985 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000986 }
987 queue = cqe->end_agg_cqe.queue_index;
988 tpa_info = &fp->tpa_info[queue];
989 DP(NETIF_MSG_RX_STATUS,
990 "calling tpa_stop on queue %d\n",
991 queue);
992
993 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
994 tpa_info->len_on_bd;
995
996 if (fp->mode == TPA_MODE_GRO)
997 pages = (frag_size + tpa_info->full_page - 1) /
998 tpa_info->full_page;
999 else
1000 pages = SGE_PAGE_ALIGN(frag_size) >>
1001 SGE_PAGE_SHIFT;
1002
1003 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1004 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001005#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001006 if (bp->panic)
1007 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001008#endif
1009
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001010 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1011 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001012 }
1013 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001014 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001015 pad = cqe_fp->placement_offset;
1016 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001017 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +00001018 pad + RX_COPY_THRESH,
1019 DMA_FROM_DEVICE);
1020 pad += NET_SKB_PAD;
1021 prefetch(data + pad); /* speedup eth_type_trans() */
1022 /* is this an error packet? */
1023 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001024 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001025 "ERROR flags %x rx packet %u\n",
1026 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +00001027 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001028 goto reuse_rx;
1029 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001030
Eric Dumazete52fcb22011-11-14 06:05:34 +00001031 /* Since we don't have a jumbo ring
1032 * copy small packets if mtu > 1500
1033 */
1034 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1035 (len <= RX_COPY_THRESH)) {
Alexander Duyck45abfb12014-12-09 19:41:17 -08001036 skb = napi_alloc_skb(&fp->napi, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001037 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001038 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001039 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001040 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041 goto reuse_rx;
1042 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001043 memcpy(skb->data, data + pad, len);
1044 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1045 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001046 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1047 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001048 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001049 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001050 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001051 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001052 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001053 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001054 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001055 bnx2x_fp_qstats(bp, fp)->
1056 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001057 goto next_rx;
1058 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001059 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001060 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001061 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1062 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001063 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001064reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001065 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001066 goto next_rx;
1067 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001068 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001069
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001070 skb_put(skb, len);
1071 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001072
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001073 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001074 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1075 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001076
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001077 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001078
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001079 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001080 bnx2x_csum_validate(skb, cqe, fp,
1081 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001082
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001083 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084
Michal Kalderoneeed0182014-08-17 16:47:44 +03001085 /* Check if this packet was timestamped */
Yuval Mintz56daf662014-08-28 08:07:32 +03001086 if (unlikely(cqe->fast_path_cqe.type_error_flags &
Michal Kalderoneeed0182014-08-17 16:47:44 +03001087 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1088 bnx2x_set_rx_ts(bp, skb);
1089
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001090 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1091 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001092 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001093 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001094
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001095 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001096
1097 if (bnx2x_fp_ll_polling(fp))
1098 netif_receive_skb(skb);
1099 else
1100 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001101next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001102 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001103
1104 bd_cons = NEXT_RX_IDX(bd_cons);
1105 bd_prod = NEXT_RX_IDX(bd_prod);
1106 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1107 rx_pkt++;
1108next_cqe:
1109 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1110 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1111
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001112 /* mark CQE as free */
1113 BNX2X_SEED_CQE(cqe_fp);
1114
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001115 if (rx_pkt == budget)
1116 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001117
1118 comp_ring_cons = RCQ_BD(sw_comp_cons);
1119 cqe = &fp->rx_comp_ring[comp_ring_cons];
1120 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001121 } /* while */
1122
1123 fp->rx_bd_cons = bd_cons;
1124 fp->rx_bd_prod = bd_prod_fw;
1125 fp->rx_comp_cons = sw_comp_cons;
1126 fp->rx_comp_prod = sw_comp_prod;
1127
1128 /* Update producers */
1129 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1130 fp->rx_sge_prod);
1131
1132 fp->rx_pkt += rx_pkt;
1133 fp->rx_calls++;
1134
1135 return rx_pkt;
1136}
1137
1138static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1139{
1140 struct bnx2x_fastpath *fp = fp_cookie;
1141 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001142 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143
Merav Sicron51c1a582012-03-18 10:33:38 +00001144 DP(NETIF_MSG_INTR,
1145 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001146 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001147
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001148 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001149
1150#ifdef BNX2X_STOP_ON_ERROR
1151 if (unlikely(bp->panic))
1152 return IRQ_HANDLED;
1153#endif
1154
1155 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001156 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001157 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001158
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001159 prefetch(&fp->sb_running_index[SM_RX_ID]);
Eric Dumazetf5fbf112014-10-29 17:07:50 -07001160 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001161
1162 return IRQ_HANDLED;
1163}
1164
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001165/* HW Lock for shared dual port PHYs */
1166void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1167{
1168 mutex_lock(&bp->port.phy_mutex);
1169
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001170 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001171}
1172
1173void bnx2x_release_phy_lock(struct bnx2x *bp)
1174{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001175 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001176
1177 mutex_unlock(&bp->port.phy_mutex);
1178}
1179
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001180/* calculates MF speed according to current linespeed and MF configuration */
1181u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1182{
1183 u16 line_speed = bp->link_vars.line_speed;
1184 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001185 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1186 bp->mf_config[BP_VN(bp)]);
1187
1188 /* Calculate the current MAX line speed limit for the MF
1189 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001190 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001191 if (IS_MF_SI(bp))
1192 line_speed = (line_speed * maxCfg) / 100;
1193 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001194 u16 vn_max_rate = maxCfg * 100;
1195
1196 if (vn_max_rate < line_speed)
1197 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001198 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001199 }
1200
1201 return line_speed;
1202}
1203
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001204/**
1205 * bnx2x_fill_report_data - fill link report data to report
1206 *
1207 * @bp: driver handle
1208 * @data: link state to update
1209 *
1210 * It uses a none-atomic bit operations because is called under the mutex.
1211 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001212static void bnx2x_fill_report_data(struct bnx2x *bp,
1213 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001214{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001215 memset(data, 0, sizeof(*data));
1216
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001217 if (IS_PF(bp)) {
1218 /* Fill the report data: effective line speed */
1219 data->line_speed = bnx2x_get_mf_speed(bp);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001220
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001221 /* Link is down */
1222 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1223 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1224 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001225
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001226 if (!BNX2X_NUM_ETH_QUEUES(bp))
1227 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1228 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001229
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001230 /* Full DUPLEX */
1231 if (bp->link_vars.duplex == DUPLEX_FULL)
1232 __set_bit(BNX2X_LINK_REPORT_FD,
1233 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001234
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001235 /* Rx Flow Control is ON */
1236 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1237 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1238 &data->link_report_flags);
1239
1240 /* Tx Flow Control is ON */
1241 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1242 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1243 &data->link_report_flags);
1244 } else { /* VF */
1245 *data = bp->vf_link_vars;
1246 }
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001247}
1248
1249/**
1250 * bnx2x_link_report - report link status to OS.
1251 *
1252 * @bp: driver handle
1253 *
1254 * Calls the __bnx2x_link_report() under the same locking scheme
1255 * as a link/PHY state managing code to ensure a consistent link
1256 * reporting.
1257 */
1258
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001259void bnx2x_link_report(struct bnx2x *bp)
1260{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001261 bnx2x_acquire_phy_lock(bp);
1262 __bnx2x_link_report(bp);
1263 bnx2x_release_phy_lock(bp);
1264}
1265
1266/**
1267 * __bnx2x_link_report - report link status to OS.
1268 *
1269 * @bp: driver handle
1270 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001271 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001272 * Should be called under the phy_lock.
1273 */
1274void __bnx2x_link_report(struct bnx2x *bp)
1275{
1276 struct bnx2x_link_report_data cur_data;
1277
1278 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001279 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001280 bnx2x_read_mf_cfg(bp);
1281
1282 /* Read the current link report info */
1283 bnx2x_fill_report_data(bp, &cur_data);
1284
1285 /* Don't report link down or exactly the same link status twice */
1286 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1287 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1288 &bp->last_reported_link.link_report_flags) &&
1289 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &cur_data.link_report_flags)))
1291 return;
1292
1293 bp->link_cnt++;
1294
1295 /* We are going to report a new link parameters now -
1296 * remember the current data for the next time.
1297 */
1298 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1299
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001300 /* propagate status to VFs */
1301 if (IS_PF(bp))
1302 bnx2x_iov_link_update(bp);
1303
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001304 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1305 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001306 netif_carrier_off(bp->dev);
1307 netdev_err(bp->dev, "NIC Link is Down\n");
1308 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001309 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001310 const char *duplex;
1311 const char *flow;
1312
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001313 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001314
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001315 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1316 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001317 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001318 else
Joe Perches94f05b02011-08-14 12:16:20 +00001319 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001321 /* Handle the FC at the end so that only these flags would be
1322 * possibly set. This way we may easily check if there is no FC
1323 * enabled.
1324 */
1325 if (cur_data.link_report_flags) {
1326 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1327 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001328 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1329 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001330 flow = "ON - receive & transmit";
1331 else
1332 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001333 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001334 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001335 }
Joe Perches94f05b02011-08-14 12:16:20 +00001336 } else {
1337 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001338 }
Joe Perches94f05b02011-08-14 12:16:20 +00001339 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1340 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001341 }
1342}
1343
Eric Dumazet1191cb82012-04-27 21:39:21 +00001344static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1345{
1346 int i;
1347
1348 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1349 struct eth_rx_sge *sge;
1350
1351 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1352 sge->addr_hi =
1353 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1354 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1355
1356 sge->addr_lo =
1357 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1358 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1359 }
1360}
1361
1362static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1363 struct bnx2x_fastpath *fp, int last)
1364{
1365 int i;
1366
1367 for (i = 0; i < last; i++) {
1368 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1369 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1370 u8 *data = first_buf->data;
1371
1372 if (data == NULL) {
1373 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1374 continue;
1375 }
1376 if (tpa_info->tpa_state == BNX2X_TPA_START)
1377 dma_unmap_single(&bp->pdev->dev,
1378 dma_unmap_addr(first_buf, mapping),
1379 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001380 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001381 first_buf->data = NULL;
1382 }
1383}
1384
Merav Sicron55c11942012-11-07 00:45:48 +00001385void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1386{
1387 int j;
1388
1389 for_each_rx_queue_cnic(bp, j) {
1390 struct bnx2x_fastpath *fp = &bp->fp[j];
1391
1392 fp->rx_bd_cons = 0;
1393
1394 /* Activate BD ring */
1395 /* Warning!
1396 * this will generate an interrupt (to the TSTORM)
1397 * must only be done after chip is initialized
1398 */
1399 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1400 fp->rx_sge_prod);
1401 }
1402}
1403
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001404void bnx2x_init_rx_rings(struct bnx2x *bp)
1405{
1406 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001407 u16 ring_prod;
1408 int i, j;
1409
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001410 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001411 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001412 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001414 DP(NETIF_MSG_IFUP,
1415 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1416
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001417 if (fp->mode != TPA_MODE_DISABLED) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001418 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001419 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001420 struct bnx2x_agg_info *tpa_info =
1421 &fp->tpa_info[i];
1422 struct sw_rx_bd *first_buf =
1423 &tpa_info->first_buf;
1424
Michal Schmidt996dedb2013-09-05 22:13:09 +02001425 first_buf->data =
1426 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001427 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001428 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1429 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001430 bnx2x_free_tpa_pool(bp, fp, i);
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001431 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001432 break;
1433 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001434 dma_unmap_addr_set(first_buf, mapping, 0);
1435 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001436 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001437
1438 /* "next page" elements initialization */
1439 bnx2x_set_next_page_sgl(fp);
1440
1441 /* set SGEs bit mask */
1442 bnx2x_init_sge_ring_bit_mask(fp);
1443
1444 /* Allocate SGEs and initialize the ring elements */
1445 for (i = 0, ring_prod = 0;
1446 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1447
Michal Schmidt996dedb2013-09-05 22:13:09 +02001448 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1449 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001450 BNX2X_ERR("was only able to allocate %d rx sges\n",
1451 i);
1452 BNX2X_ERR("disabling TPA for queue[%d]\n",
1453 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001454 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001455 bnx2x_free_rx_sge_range(bp, fp,
1456 ring_prod);
1457 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001458 MAX_AGG_QS(bp));
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001459 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001460 ring_prod = 0;
1461 break;
1462 }
1463 ring_prod = NEXT_SGE_IDX(ring_prod);
1464 }
1465
1466 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001467 }
1468 }
1469
Merav Sicron55c11942012-11-07 00:45:48 +00001470 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001471 struct bnx2x_fastpath *fp = &bp->fp[j];
1472
1473 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001474
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001475 /* Activate BD ring */
1476 /* Warning!
1477 * this will generate an interrupt (to the TSTORM)
1478 * must only be done after chip is initialized
1479 */
1480 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1481 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001482
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001483 if (j != 0)
1484 continue;
1485
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001486 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001487 REG_WR(bp, BAR_USTRORM_INTMEM +
1488 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1489 U64_LO(fp->rx_comp_mapping));
1490 REG_WR(bp, BAR_USTRORM_INTMEM +
1491 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1492 U64_HI(fp->rx_comp_mapping));
1493 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001494 }
1495}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001496
Merav Sicron55c11942012-11-07 00:45:48 +00001497static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1498{
1499 u8 cos;
1500 struct bnx2x *bp = fp->bp;
1501
1502 for_each_cos_in_tx_queue(fp, cos) {
1503 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1504 unsigned pkts_compl = 0, bytes_compl = 0;
1505
1506 u16 sw_prod = txdata->tx_pkt_prod;
1507 u16 sw_cons = txdata->tx_pkt_cons;
1508
1509 while (sw_cons != sw_prod) {
1510 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1511 &pkts_compl, &bytes_compl);
1512 sw_cons++;
1513 }
1514
1515 netdev_tx_reset_queue(
1516 netdev_get_tx_queue(bp->dev,
1517 txdata->txq_index));
1518 }
1519}
1520
1521static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1522{
1523 int i;
1524
1525 for_each_tx_queue_cnic(bp, i) {
1526 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1527 }
1528}
1529
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001530static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1531{
1532 int i;
1533
Merav Sicron55c11942012-11-07 00:45:48 +00001534 for_each_eth_queue(bp, i) {
1535 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001536 }
1537}
1538
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001539static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1540{
1541 struct bnx2x *bp = fp->bp;
1542 int i;
1543
1544 /* ring wasn't allocated */
1545 if (fp->rx_buf_ring == NULL)
1546 return;
1547
1548 for (i = 0; i < NUM_RX_BD; i++) {
1549 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001550 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001551
Eric Dumazete52fcb22011-11-14 06:05:34 +00001552 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001553 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001554 dma_unmap_single(&bp->pdev->dev,
1555 dma_unmap_addr(rx_buf, mapping),
1556 fp->rx_buf_size, DMA_FROM_DEVICE);
1557
Eric Dumazete52fcb22011-11-14 06:05:34 +00001558 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001559 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001560 }
1561}
1562
Merav Sicron55c11942012-11-07 00:45:48 +00001563static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1564{
1565 int j;
1566
1567 for_each_rx_queue_cnic(bp, j) {
1568 bnx2x_free_rx_bds(&bp->fp[j]);
1569 }
1570}
1571
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001572static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1573{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001574 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001575
Merav Sicron55c11942012-11-07 00:45:48 +00001576 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001577 struct bnx2x_fastpath *fp = &bp->fp[j];
1578
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001579 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001580
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001581 if (fp->mode != TPA_MODE_DISABLED)
David S. Miller8decf862011-09-22 03:23:13 -04001582 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001583 }
1584}
1585
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001586static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001587{
1588 bnx2x_free_tx_skbs_cnic(bp);
1589 bnx2x_free_rx_skbs_cnic(bp);
1590}
1591
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001592void bnx2x_free_skbs(struct bnx2x *bp)
1593{
1594 bnx2x_free_tx_skbs(bp);
1595 bnx2x_free_rx_skbs(bp);
1596}
1597
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001598void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1599{
1600 /* load old values */
1601 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1602
1603 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1604 /* leave all but MAX value */
1605 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1606
1607 /* set new MAX value */
1608 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1609 & FUNC_MF_CFG_MAX_BW_MASK;
1610
1611 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1612 }
1613}
1614
Dmitry Kravkovca924292011-06-14 01:33:08 +00001615/**
1616 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1617 *
1618 * @bp: driver handle
1619 * @nvecs: number of vectors to be released
1620 */
1621static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001622{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001623 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001624
Dmitry Kravkovca924292011-06-14 01:33:08 +00001625 if (nvecs == offset)
1626 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001627
1628 /* VFs don't have a default SB */
1629 if (IS_PF(bp)) {
1630 free_irq(bp->msix_table[offset].vector, bp->dev);
1631 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1632 bp->msix_table[offset].vector);
1633 offset++;
1634 }
Merav Sicron55c11942012-11-07 00:45:48 +00001635
1636 if (CNIC_SUPPORT(bp)) {
1637 if (nvecs == offset)
1638 return;
1639 offset++;
1640 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001641
Dmitry Kravkovca924292011-06-14 01:33:08 +00001642 for_each_eth_queue(bp, i) {
1643 if (nvecs == offset)
1644 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001645 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1646 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001647
1648 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001649 }
1650}
1651
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001652void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001653{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001654 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001655 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1656 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1657
1658 /* vfs don't have a default status block */
1659 if (IS_PF(bp))
1660 nvecs++;
1661
1662 bnx2x_free_msix_irqs(bp, nvecs);
1663 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001664 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001665 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001666}
1667
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001668int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001669{
Ariel Elior1ab44342013-01-01 05:22:23 +00001670 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001671
Ariel Elior1ab44342013-01-01 05:22:23 +00001672 /* VFs don't have a default status block */
1673 if (IS_PF(bp)) {
1674 bp->msix_table[msix_vec].entry = msix_vec;
1675 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1676 bp->msix_table[0].entry);
1677 msix_vec++;
1678 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001679
Merav Sicron55c11942012-11-07 00:45:48 +00001680 /* Cnic requires an msix vector for itself */
1681 if (CNIC_SUPPORT(bp)) {
1682 bp->msix_table[msix_vec].entry = msix_vec;
1683 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1684 msix_vec, bp->msix_table[msix_vec].entry);
1685 msix_vec++;
1686 }
1687
Ariel Elior6383c0b2011-07-14 08:31:57 +00001688 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001689 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001690 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001691 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1692 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001693 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001694 }
1695
Ariel Elior1ab44342013-01-01 05:22:23 +00001696 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1697 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001698
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001699 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1700 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001701 /*
1702 * reconfigure number of tx/rx queues according to available
1703 * MSI-X vectors
1704 */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001705 if (rc == -ENOSPC) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001706 /* Get by with single vector */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001707 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1708 if (rc < 0) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001709 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1710 rc);
1711 goto no_msix;
1712 }
1713
1714 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1715 bp->flags |= USING_SINGLE_MSIX_FLAG;
1716
Merav Sicron55c11942012-11-07 00:45:48 +00001717 BNX2X_DEV_INFO("set number of queues to 1\n");
1718 bp->num_ethernet_queues = 1;
1719 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001720 } else if (rc < 0) {
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001721 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001722 goto no_msix;
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001723 } else if (rc < msix_vec) {
1724 /* how less vectors we will have? */
1725 int diff = msix_vec - rc;
1726
1727 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1728
1729 /*
1730 * decrease number of queues by number of unallocated entries
1731 */
1732 bp->num_ethernet_queues -= diff;
1733 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1734
1735 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1736 bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001737 }
1738
1739 bp->flags |= USING_MSIX_FLAG;
1740
1741 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001742
1743no_msix:
1744 /* fall to INTx if not enough memory */
1745 if (rc == -ENOMEM)
1746 bp->flags |= DISABLE_MSI_FLAG;
1747
1748 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001749}
1750
1751static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1752{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001753 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001754
Ariel Eliorad5afc82013-01-01 05:22:26 +00001755 /* no default status block for vf */
1756 if (IS_PF(bp)) {
1757 rc = request_irq(bp->msix_table[offset++].vector,
1758 bnx2x_msix_sp_int, 0,
1759 bp->dev->name, bp->dev);
1760 if (rc) {
1761 BNX2X_ERR("request sp irq failed\n");
1762 return -EBUSY;
1763 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001764 }
1765
Merav Sicron55c11942012-11-07 00:45:48 +00001766 if (CNIC_SUPPORT(bp))
1767 offset++;
1768
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001769 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001770 struct bnx2x_fastpath *fp = &bp->fp[i];
1771 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1772 bp->dev->name, i);
1773
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001774 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001775 bnx2x_msix_fp_int, 0, fp->name, fp);
1776 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001777 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1778 bp->msix_table[offset].vector, rc);
1779 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001780 return -EBUSY;
1781 }
1782
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001783 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001784 }
1785
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001786 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001787 if (IS_PF(bp)) {
1788 offset = 1 + CNIC_SUPPORT(bp);
1789 netdev_info(bp->dev,
1790 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1791 bp->msix_table[0].vector,
1792 0, bp->msix_table[offset].vector,
1793 i - 1, bp->msix_table[offset + i - 1].vector);
1794 } else {
1795 offset = CNIC_SUPPORT(bp);
1796 netdev_info(bp->dev,
1797 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1798 0, bp->msix_table[offset].vector,
1799 i - 1, bp->msix_table[offset + i - 1].vector);
1800 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001801 return 0;
1802}
1803
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001804int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001805{
1806 int rc;
1807
1808 rc = pci_enable_msi(bp->pdev);
1809 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001810 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001811 return -1;
1812 }
1813 bp->flags |= USING_MSI_FLAG;
1814
1815 return 0;
1816}
1817
1818static int bnx2x_req_irq(struct bnx2x *bp)
1819{
1820 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001821 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001822
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001823 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001824 flags = 0;
1825 else
1826 flags = IRQF_SHARED;
1827
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001828 if (bp->flags & USING_MSIX_FLAG)
1829 irq = bp->msix_table[0].vector;
1830 else
1831 irq = bp->pdev->irq;
1832
1833 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001834}
1835
Yuval Mintzc957d092013-06-25 08:50:11 +03001836static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001837{
1838 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001839 if (bp->flags & USING_MSIX_FLAG &&
1840 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001841 rc = bnx2x_req_msix_irqs(bp);
1842 if (rc)
1843 return rc;
1844 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001845 rc = bnx2x_req_irq(bp);
1846 if (rc) {
1847 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1848 return rc;
1849 }
1850 if (bp->flags & USING_MSI_FLAG) {
1851 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001852 netdev_info(bp->dev, "using MSI IRQ %d\n",
1853 bp->dev->irq);
1854 }
1855 if (bp->flags & USING_MSIX_FLAG) {
1856 bp->dev->irq = bp->msix_table[0].vector;
1857 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1858 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001859 }
1860 }
1861
1862 return 0;
1863}
1864
Merav Sicron55c11942012-11-07 00:45:48 +00001865static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1866{
1867 int i;
1868
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001869 for_each_rx_queue_cnic(bp, i) {
Eric Dumazet074975d2015-04-14 18:45:00 -07001870 bnx2x_fp_busy_poll_init(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001871 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001872 }
Merav Sicron55c11942012-11-07 00:45:48 +00001873}
1874
Eric Dumazet1191cb82012-04-27 21:39:21 +00001875static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001876{
1877 int i;
1878
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001879 for_each_eth_queue(bp, i) {
Eric Dumazet074975d2015-04-14 18:45:00 -07001880 bnx2x_fp_busy_poll_init(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001881 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001882 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883}
1884
Merav Sicron55c11942012-11-07 00:45:48 +00001885static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1886{
1887 int i;
1888
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001889 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001890 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001891 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1892 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001893 }
Merav Sicron55c11942012-11-07 00:45:48 +00001894}
1895
Eric Dumazet1191cb82012-04-27 21:39:21 +00001896static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001897{
1898 int i;
1899
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001900 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001901 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001902 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1903 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001904 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001905}
1906
1907void bnx2x_netif_start(struct bnx2x *bp)
1908{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001909 if (netif_running(bp->dev)) {
1910 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001911 if (CNIC_LOADED(bp))
1912 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001913 bnx2x_int_enable(bp);
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001916 }
1917}
1918
1919void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1920{
1921 bnx2x_int_disable_sync(bp, disable_hw);
1922 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001923 if (CNIC_LOADED(bp))
1924 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001925}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001926
Jason Wangf663dd92014-01-10 16:18:26 +08001927u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001928 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001929{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001930 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001931
Merav Sicron55c11942012-11-07 00:45:48 +00001932 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001933 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1934 u16 ether_type = ntohs(hdr->h_proto);
1935
1936 /* Skip VLAN tag if present */
1937 if (ether_type == ETH_P_8021Q) {
1938 struct vlan_ethhdr *vhdr =
1939 (struct vlan_ethhdr *)skb->data;
1940
1941 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1942 }
1943
1944 /* If ethertype is FCoE or FIP - use FCoE ring */
1945 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001946 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001947 }
Merav Sicron55c11942012-11-07 00:45:48 +00001948
David S. Miller823dcd22011-08-20 10:39:12 -07001949 /* select a non-FCoE queue */
Daniel Borkmann99932d42014-02-16 15:55:20 +01001950 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001951}
1952
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001953void bnx2x_set_num_queues(struct bnx2x *bp)
1954{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001955 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001956 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001957
Barak Witkowskia3348722012-04-23 03:04:46 +00001958 /* override in STORAGE SD modes */
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03001959 if (IS_MF_STORAGE_ONLY(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001960 bp->num_ethernet_queues = 1;
1961
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001962 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001963 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1964 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001965
1966 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001967}
1968
David S. Miller823dcd22011-08-20 10:39:12 -07001969/**
1970 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1971 *
1972 * @bp: Driver handle
1973 *
1974 * We currently support for at most 16 Tx queues for each CoS thus we will
1975 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1976 * bp->max_cos.
1977 *
1978 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1979 * index after all ETH L2 indices.
1980 *
1981 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1982 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001983 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001984 *
1985 * The proper configuration of skb->queue_mapping is handled by
1986 * bnx2x_select_queue() and __skb_tx_hash().
1987 *
1988 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1989 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1990 */
Merav Sicron55c11942012-11-07 00:45:48 +00001991static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001992{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001993 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001994
Merav Sicron65565882012-06-19 07:48:26 +00001995 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001996 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001997
1998/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001999 if (include_cnic && !NO_FCOE(bp)) {
2000 rx++;
2001 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002002 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002003
2004 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2005 if (rc) {
2006 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2007 return rc;
2008 }
2009 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2010 if (rc) {
2011 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2012 return rc;
2013 }
2014
Merav Sicron51c1a582012-03-18 10:33:38 +00002015 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002016 tx, rx);
2017
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002018 return rc;
2019}
2020
Eric Dumazet1191cb82012-04-27 21:39:21 +00002021static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002022{
2023 int i;
2024
2025 for_each_queue(bp, i) {
2026 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00002027 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002028
2029 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2030 if (IS_FCOE_IDX(i))
2031 /*
2032 * Although there are no IP frames expected to arrive to
2033 * this ring we still want to add an
2034 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2035 * overrun attack.
2036 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00002037 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002038 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00002039 mtu = bp->dev->mtu;
2040 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2041 IP_HEADER_ALIGNMENT_PADDING +
2042 ETH_OVREHEAD +
2043 mtu +
2044 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002045 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00002046 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2047 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2048 else
2049 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002050 }
2051}
2052
Ariel Elior60cad4e2013-09-04 14:09:22 +03002053static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002054{
2055 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002056 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2057
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002058 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059 * enabled
2060 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002061 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2062 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002063 bp->fp->cl_id +
2064 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002065
2066 /*
2067 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2068 * per-port, so if explicit configuration is needed , do it only
2069 * for a PMF.
2070 *
2071 * For 57712 and newer on the other hand it's a per-function
2072 * configuration.
2073 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002074 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002075}
2076
Ariel Elior60cad4e2013-09-04 14:09:22 +03002077int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2078 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002079{
Yuval Mintz3b603062012-03-18 10:33:39 +00002080 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002081
2082 /* Although RSS is meaningless when there is a single HW queue we
2083 * still need it enabled in order to have HW Rx hash generated.
2084 *
2085 * if (!is_eth_multi(bp))
2086 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2087 */
2088
Dmitry Kravkov96305232012-04-03 18:41:30 +00002089 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002090
2091 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2092
Ariel Elior60cad4e2013-09-04 14:09:22 +03002093 if (enable) {
2094 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002095
Ariel Elior60cad4e2013-09-04 14:09:22 +03002096 /* RSS configuration */
2097 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2098 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2099 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2101 if (rss_obj->udp_rss_v4)
2102 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2103 if (rss_obj->udp_rss_v6)
2104 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002105
2106 if (!CHIP_IS_E1x(bp))
2107 /* valid only for TUNN_MODE_GRE tunnel mode */
2108 __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002109 } else {
2110 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2111 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002112
Dmitry Kravkov96305232012-04-03 18:41:30 +00002113 /* Hash bits */
2114 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002115
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002116 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002117
Dmitry Kravkov96305232012-04-03 18:41:30 +00002118 if (config_hash) {
2119 /* RSS keys */
Eric Dumazete3ec69c2014-11-16 06:23:07 -08002120 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002121 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002122 }
2123
Ariel Elior60cad4e2013-09-04 14:09:22 +03002124 if (IS_PF(bp))
2125 return bnx2x_config_rss(bp, &params);
2126 else
2127 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002128}
2129
Eric Dumazet1191cb82012-04-27 21:39:21 +00002130static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002131{
Yuval Mintz3b603062012-03-18 10:33:39 +00002132 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002133
2134 /* Prepare parameters for function state transitions */
2135 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2136
2137 func_params.f_obj = &bp->func_obj;
2138 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2139
2140 func_params.params.hw_init.load_phase = load_code;
2141
2142 return bnx2x_func_state_change(bp, &func_params);
2143}
2144
2145/*
2146 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002147 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002148 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002149void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002150{
2151 int rc;
2152 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002153 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002154 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002155
2156 /***************** Cleanup MACs' object first *************************/
2157
2158 /* Wait for completion of requested */
2159 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2160 /* Perform a dry cleanup */
2161 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2162
2163 /* Clean ETH primary MAC */
2164 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002165 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002166 &ramrod_flags);
2167 if (rc != 0)
2168 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2169
2170 /* Cleanup UC list */
2171 vlan_mac_flags = 0;
2172 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2173 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2174 &ramrod_flags);
2175 if (rc != 0)
2176 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2177
2178 /***************** Now clean mcast object *****************************/
2179 rparam.mcast_obj = &bp->mcast_obj;
2180 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2181
Yuval Mintz8b09be52013-08-01 17:30:59 +03002182 /* Add a DEL command... - Since we're doing a driver cleanup only,
2183 * we take a lock surrounding both the initial send and the CONTs,
2184 * as we don't want a true completion to disrupt us in the middle.
2185 */
2186 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002187 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2188 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002189 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2190 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002191
2192 /* ...and wait until all pending commands are cleared */
2193 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2194 while (rc != 0) {
2195 if (rc < 0) {
2196 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2197 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002198 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002199 return;
2200 }
2201
2202 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2203 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002204 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002205}
2206
2207#ifndef BNX2X_STOP_ON_ERROR
2208#define LOAD_ERROR_EXIT(bp, label) \
2209 do { \
2210 (bp)->state = BNX2X_STATE_ERROR; \
2211 goto label; \
2212 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002213
2214#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2215 do { \
2216 bp->cnic_loaded = false; \
2217 goto label; \
2218 } while (0)
2219#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002220#define LOAD_ERROR_EXIT(bp, label) \
2221 do { \
2222 (bp)->state = BNX2X_STATE_ERROR; \
2223 (bp)->panic = 1; \
2224 return -EBUSY; \
2225 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002226#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2227 do { \
2228 bp->cnic_loaded = false; \
2229 (bp)->panic = 1; \
2230 return -EBUSY; \
2231 } while (0)
2232#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002233
Ariel Eliorad5afc82013-01-01 05:22:26 +00002234static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002235{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002236 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2237 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2238 return;
2239}
Yuval Mintz452427b2012-03-26 20:47:07 +00002240
Ariel Eliorad5afc82013-01-01 05:22:26 +00002241static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2242{
Ariel Elior8db573b2013-01-01 05:22:37 +00002243 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002244 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002245
Ariel Eliorad5afc82013-01-01 05:22:26 +00002246 /* number of queues for statistics is number of eth queues + FCoE */
2247 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002248
Ariel Eliorad5afc82013-01-01 05:22:26 +00002249 /* Total number of FW statistics requests =
2250 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2251 * and fcoe l2 queue) stats + num of queues (which includes another 1
2252 * for fcoe l2 queue if applicable)
2253 */
2254 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2255
Ariel Elior8db573b2013-01-01 05:22:37 +00002256 /* vf stats appear in the request list, but their data is allocated by
2257 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2258 * it is used to determine where to place the vf stats queries in the
2259 * request struct
2260 */
2261 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002262 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002263
Ariel Eliorad5afc82013-01-01 05:22:26 +00002264 /* Request is built from stats_query_header and an array of
2265 * stats_query_cmd_group each of which contains
2266 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2267 * configured in the stats_query_header.
2268 */
2269 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002270 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2271 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002272 1 : 0));
2273
Ariel Elior8db573b2013-01-01 05:22:37 +00002274 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2275 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002276 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2277 num_groups * sizeof(struct stats_query_cmd_group);
2278
2279 /* Data for statistics requests + stats_counter
2280 * stats_counter holds per-STORM counters that are incremented
2281 * when STORM has finished with the current request.
2282 * memory for FCoE offloaded statistics are counted anyway,
2283 * even if they will not be sent.
2284 * VF stats are not accounted for here as the data of VF stats is stored
2285 * in memory allocated by the VF, not here.
2286 */
2287 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2288 sizeof(struct per_pf_stats) +
2289 sizeof(struct fcoe_statistics_params) +
2290 sizeof(struct per_queue_stats) * num_queue_stats +
2291 sizeof(struct stats_counter);
2292
Joe Perchescd2b0382014-02-20 13:25:51 -08002293 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2294 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2295 if (!bp->fw_stats)
2296 goto alloc_mem_err;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002297
2298 /* Set shortcuts */
2299 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2300 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2301 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2302 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2303 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2304 bp->fw_stats_req_sz;
2305
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002306 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002307 U64_HI(bp->fw_stats_req_mapping),
2308 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002309 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002310 U64_HI(bp->fw_stats_data_mapping),
2311 U64_LO(bp->fw_stats_data_mapping));
2312 return 0;
2313
2314alloc_mem_err:
2315 bnx2x_free_fw_stats_mem(bp);
2316 BNX2X_ERR("Can't allocate FW stats memory\n");
2317 return -ENOMEM;
2318}
2319
2320/* send load request to mcp and analyze response */
2321static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2322{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002323 u32 param;
2324
Ariel Eliorad5afc82013-01-01 05:22:26 +00002325 /* init fw_seq */
2326 bp->fw_seq =
2327 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2328 DRV_MSG_SEQ_NUMBER_MASK);
2329 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2330
2331 /* Get current FW pulse sequence */
2332 bp->fw_drv_pulse_wr_seq =
2333 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2334 DRV_PULSE_SEQ_MASK);
2335 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2336
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002337 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2338
2339 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2340 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2341
Ariel Eliorad5afc82013-01-01 05:22:26 +00002342 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002343 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002344
2345 /* if mcp fails to respond we must abort */
2346 if (!(*load_code)) {
2347 BNX2X_ERR("MCP response failure, aborting\n");
2348 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002349 }
2350
Ariel Eliorad5afc82013-01-01 05:22:26 +00002351 /* If mcp refused (e.g. other port is in diagnostic mode) we
2352 * must abort
2353 */
2354 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2355 BNX2X_ERR("MCP refused load request, aborting\n");
2356 return -EBUSY;
2357 }
2358 return 0;
2359}
2360
2361/* check whether another PF has already loaded FW to chip. In
2362 * virtualized environments a pf from another VM may have already
2363 * initialized the device including loading FW
2364 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002365int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002366{
2367 /* is another pf loaded on this engine? */
2368 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2369 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2370 /* build my FW version dword */
2371 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2372 (BCM_5710_FW_MINOR_VERSION << 8) +
2373 (BCM_5710_FW_REVISION_VERSION << 16) +
2374 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2375
2376 /* read loaded FW from chip */
2377 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2378
2379 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2380 loaded_fw, my_fw);
2381
2382 /* abort nic load if version mismatch */
2383 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002384 if (print_err)
2385 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2386 loaded_fw, my_fw);
2387 else
2388 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2389 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002390 return -EBUSY;
2391 }
2392 }
2393 return 0;
2394}
2395
2396/* returns the "mcp load_code" according to global load_count array */
2397static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2398{
2399 int path = BP_PATH(bp);
2400
2401 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002402 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2403 bnx2x_load_count[path][2]);
2404 bnx2x_load_count[path][0]++;
2405 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002406 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002407 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2408 bnx2x_load_count[path][2]);
2409 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002410 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002411 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002412 return FW_MSG_CODE_DRV_LOAD_PORT;
2413 else
2414 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2415}
2416
2417/* mark PMF if applicable */
2418static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2419{
2420 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2421 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2422 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2423 bp->port.pmf = 1;
2424 /* We need the barrier to ensure the ordering between the
2425 * writing to bp->port.pmf here and reading it from the
2426 * bnx2x_periodic_task().
2427 */
2428 smp_mb();
2429 } else {
2430 bp->port.pmf = 0;
2431 }
2432
2433 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2434}
2435
2436static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2437{
2438 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2439 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2440 (bp->common.shmem2_base)) {
2441 if (SHMEM2_HAS(bp, dcc_support))
2442 SHMEM2_WR(bp, dcc_support,
2443 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2444 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2445 if (SHMEM2_HAS(bp, afex_driver_support))
2446 SHMEM2_WR(bp, afex_driver_support,
2447 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2448 }
2449
2450 /* Set AFEX default VLAN tag to an invalid value */
2451 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002452}
2453
Eric Dumazet1191cb82012-04-27 21:39:21 +00002454/**
2455 * bnx2x_bz_fp - zero content of the fastpath structure.
2456 *
2457 * @bp: driver handle
2458 * @index: fastpath index to be zeroed
2459 *
2460 * Makes sure the contents of the bp->fp[index].napi is kept
2461 * intact.
2462 */
2463static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2464{
2465 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002466 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002467 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002468 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002469
Eric Dumazet1191cb82012-04-27 21:39:21 +00002470 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002471 if (fp->tpa_info)
2472 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2473 sizeof(struct bnx2x_agg_info));
2474 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002475
2476 /* Restore the NAPI object as it has been already initialized */
2477 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002478 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002479 fp->bp = bp;
2480 fp->index = index;
2481 if (IS_ETH_FP(fp))
2482 fp->max_cos = bp->max_cos;
2483 else
2484 /* Special queues support only one CoS */
2485 fp->max_cos = 1;
2486
Merav Sicron65565882012-06-19 07:48:26 +00002487 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002488 if (IS_FCOE_FP(fp))
2489 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002490 if (IS_ETH_FP(fp))
2491 for_each_cos_in_tx_queue(fp, cos)
2492 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2493 BNX2X_NUM_ETH_QUEUES(bp) + index];
2494
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002495 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002496 * minimal size so it must be set prior to queue memory allocation
2497 */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002498 if (bp->dev->features & NETIF_F_LRO)
Eric Dumazet1191cb82012-04-27 21:39:21 +00002499 fp->mode = TPA_MODE_LRO;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002500 else if (bp->dev->features & NETIF_F_GRO &&
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002501 bnx2x_mtu_allows_gro(bp->dev->mtu))
Eric Dumazet1191cb82012-04-27 21:39:21 +00002502 fp->mode = TPA_MODE_GRO;
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002503 else
2504 fp->mode = TPA_MODE_DISABLED;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002505
Michal Schmidt22a8f232015-04-27 17:20:38 +02002506 /* We don't want TPA if it's disabled in bp
2507 * or if this is an FCoE L2 ring.
2508 */
2509 if (bp->disable_tpa || IS_FCOE_FP(fp))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002510 fp->mode = TPA_MODE_DISABLED;
Merav Sicron55c11942012-11-07 00:45:48 +00002511}
2512
2513int bnx2x_load_cnic(struct bnx2x *bp)
2514{
2515 int i, rc, port = BP_PORT(bp);
2516
2517 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2518
2519 mutex_init(&bp->cnic_mutex);
2520
Ariel Eliorad5afc82013-01-01 05:22:26 +00002521 if (IS_PF(bp)) {
2522 rc = bnx2x_alloc_mem_cnic(bp);
2523 if (rc) {
2524 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2525 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2526 }
Merav Sicron55c11942012-11-07 00:45:48 +00002527 }
2528
2529 rc = bnx2x_alloc_fp_mem_cnic(bp);
2530 if (rc) {
2531 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2532 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2533 }
2534
2535 /* Update the number of queues with the cnic queues */
2536 rc = bnx2x_set_real_num_queues(bp, 1);
2537 if (rc) {
2538 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2539 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540 }
2541
2542 /* Add all CNIC NAPI objects */
2543 bnx2x_add_all_napi_cnic(bp);
2544 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2545 bnx2x_napi_enable_cnic(bp);
2546
2547 rc = bnx2x_init_hw_func_cnic(bp);
2548 if (rc)
2549 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2550
2551 bnx2x_nic_init_cnic(bp);
2552
Ariel Eliorad5afc82013-01-01 05:22:26 +00002553 if (IS_PF(bp)) {
2554 /* Enable Timer scan */
2555 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002556
Ariel Eliorad5afc82013-01-01 05:22:26 +00002557 /* setup cnic queues */
2558 for_each_cnic_queue(bp, i) {
2559 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2560 if (rc) {
2561 BNX2X_ERR("Queue setup failed\n");
2562 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2563 }
Merav Sicron55c11942012-11-07 00:45:48 +00002564 }
2565 }
2566
2567 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002568 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002569
2570 /* re-read iscsi info */
2571 bnx2x_get_iscsi_info(bp);
2572 bnx2x_setup_cnic_irq_info(bp);
2573 bnx2x_setup_cnic_info(bp);
2574 bp->cnic_loaded = true;
2575 if (bp->state == BNX2X_STATE_OPEN)
2576 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2577
Merav Sicron55c11942012-11-07 00:45:48 +00002578 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2579
2580 return 0;
2581
2582#ifndef BNX2X_STOP_ON_ERROR
2583load_error_cnic2:
2584 /* Disable Timer scan */
2585 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2586
2587load_error_cnic1:
2588 bnx2x_napi_disable_cnic(bp);
2589 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002590 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002591 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2592load_error_cnic0:
2593 BNX2X_ERR("CNIC-related load failed\n");
2594 bnx2x_free_fp_mem_cnic(bp);
2595 bnx2x_free_mem_cnic(bp);
2596 return rc;
2597#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002598}
2599
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002600/* must be called with rtnl_lock */
2601int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2602{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002603 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002604 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002605
Merav Sicron55c11942012-11-07 00:45:48 +00002606 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2607 DP(NETIF_MSG_IFUP,
2608 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2609
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002610#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002611 if (unlikely(bp->panic)) {
2612 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002613 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002614 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002615#endif
2616
2617 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2618
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002619 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002620 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2621 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2622 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002623
Ariel Eliorad5afc82013-01-01 05:22:26 +00002624 if (IS_PF(bp))
2625 /* must be called before memory allocation and HW init */
2626 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002627
Ariel Elior6383c0b2011-07-14 08:31:57 +00002628 /*
2629 * Zero fastpath structures preserving invariants like napi, which are
2630 * allocated only once, fp index, max_cos, bp pointer.
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002631 * Also set fp->mode and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002632 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002633 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002634 for_each_queue(bp, i)
2635 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002636 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2637 bp->num_cnic_queues) *
2638 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002639
Merav Sicron55c11942012-11-07 00:45:48 +00002640 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002641
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002642 /* Set the receive queues buffer size */
2643 bnx2x_set_rx_buf_size(bp);
2644
Ariel Eliorad5afc82013-01-01 05:22:26 +00002645 if (IS_PF(bp)) {
2646 rc = bnx2x_alloc_mem(bp);
2647 if (rc) {
2648 BNX2X_ERR("Unable to allocate bp memory\n");
2649 return rc;
2650 }
2651 }
2652
Ariel Eliorad5afc82013-01-01 05:22:26 +00002653 /* need to be done after alloc mem, since it's self adjusting to amount
2654 * of memory available for RSS queues
2655 */
2656 rc = bnx2x_alloc_fp_mem(bp);
2657 if (rc) {
2658 BNX2X_ERR("Unable to allocate memory for fps\n");
2659 LOAD_ERROR_EXIT(bp, load_error0);
2660 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002661
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002662 /* Allocated memory for FW statistics */
2663 if (bnx2x_alloc_fw_stats_mem(bp))
2664 LOAD_ERROR_EXIT(bp, load_error0);
2665
Ariel Elior8d9ac292013-01-01 05:22:27 +00002666 /* request pf to initialize status blocks */
2667 if (IS_VF(bp)) {
2668 rc = bnx2x_vfpf_init(bp);
2669 if (rc)
2670 LOAD_ERROR_EXIT(bp, load_error0);
2671 }
2672
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002673 /* As long as bnx2x_alloc_mem() may possibly update
2674 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002675 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002676 */
Merav Sicron55c11942012-11-07 00:45:48 +00002677 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002678 if (rc) {
2679 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002680 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002681 }
2682
Ariel Elior6383c0b2011-07-14 08:31:57 +00002683 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002684 * this configuration may be overridden by a multi class queue
2685 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002686 */
2687 bnx2x_setup_tc(bp->dev, bp->max_cos);
2688
Merav Sicron26614ba2012-08-27 03:26:19 +00002689 /* Add all NAPI objects */
2690 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002691 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002692 bnx2x_napi_enable(bp);
2693
Ariel Eliorad5afc82013-01-01 05:22:26 +00002694 if (IS_PF(bp)) {
2695 /* set pf load just before approaching the MCP */
2696 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002697
Ariel Eliorad5afc82013-01-01 05:22:26 +00002698 /* if mcp exists send load request and analyze response */
2699 if (!BP_NOMCP(bp)) {
2700 /* attempt to load pf */
2701 rc = bnx2x_nic_load_request(bp, &load_code);
2702 if (rc)
2703 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002704
Ariel Eliorad5afc82013-01-01 05:22:26 +00002705 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002706 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002707 if (rc) {
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002709 LOAD_ERROR_EXIT(bp, load_error2);
2710 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002711 } else {
2712 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002713 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002714
Ariel Eliorad5afc82013-01-01 05:22:26 +00002715 /* mark pmf if applicable */
2716 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002717
Ariel Eliorad5afc82013-01-01 05:22:26 +00002718 /* Init Function state controlling object */
2719 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002720
Ariel Eliorad5afc82013-01-01 05:22:26 +00002721 /* Initialize HW */
2722 rc = bnx2x_init_hw(bp, load_code);
2723 if (rc) {
2724 BNX2X_ERR("HW init failed, aborting\n");
2725 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2726 LOAD_ERROR_EXIT(bp, load_error2);
2727 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002728 }
2729
Yuval Mintzecf01c22013-04-22 02:53:03 +00002730 bnx2x_pre_irq_nic_init(bp);
2731
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002732 /* Connect to IRQs */
2733 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002734 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002735 BNX2X_ERR("setup irqs failed\n");
2736 if (IS_PF(bp))
2737 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002738 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002739 }
2740
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002741 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002742 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002743 /* Setup NIC internals and enable interrupts */
2744 bnx2x_post_irq_nic_init(bp, load_code);
2745
Ariel Eliorad5afc82013-01-01 05:22:26 +00002746 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002747 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002748
Ariel Eliorad5afc82013-01-01 05:22:26 +00002749 /* Set AFEX default VLAN tag to an invalid value */
2750 bp->afex_def_vlan_tag = -1;
2751 bnx2x_nic_load_afex_dcc(bp, load_code);
2752 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2753 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002754 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002755 BNX2X_ERR("Function start failed!\n");
2756 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2757
Merav Sicron55c11942012-11-07 00:45:48 +00002758 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002759 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002760
Ariel Eliorad5afc82013-01-01 05:22:26 +00002761 /* Send LOAD_DONE command to MCP */
2762 if (!BP_NOMCP(bp)) {
2763 load_code = bnx2x_fw_command(bp,
2764 DRV_MSG_CODE_LOAD_DONE, 0);
2765 if (!load_code) {
2766 BNX2X_ERR("MCP response failure, aborting\n");
2767 rc = -EBUSY;
2768 LOAD_ERROR_EXIT(bp, load_error3);
2769 }
2770 }
2771
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002772 /* initialize FW coalescing state machines in RAM */
2773 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002774 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002775
Ariel Elior60cad4e2013-09-04 14:09:22 +03002776 /* setup the leading queue */
2777 rc = bnx2x_setup_leading(bp);
2778 if (rc) {
2779 BNX2X_ERR("Setup leading failed!\n");
2780 LOAD_ERROR_EXIT(bp, load_error3);
2781 }
2782
2783 /* set up the rest of the queues */
2784 for_each_nondefault_eth_queue(bp, i) {
2785 if (IS_PF(bp))
2786 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2787 else /* VF */
2788 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002789 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002790 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002791 LOAD_ERROR_EXIT(bp, load_error3);
2792 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002793 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002794
Ariel Elior60cad4e2013-09-04 14:09:22 +03002795 /* setup rss */
2796 rc = bnx2x_init_rss(bp);
2797 if (rc) {
2798 BNX2X_ERR("PF RSS init failed\n");
2799 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002800 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002801
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002802 /* Now when Clients are configured we are ready to work */
2803 bp->state = BNX2X_STATE_OPEN;
2804
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002805 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002806 if (IS_PF(bp))
2807 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002808 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002809 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2810 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002811 if (rc) {
2812 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002813 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002814 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002815
Ariel Eliorad5afc82013-01-01 05:22:26 +00002816 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002817 bnx2x_update_max_mf_config(bp, bp->pending_max);
2818 bp->pending_max = 0;
2819 }
2820
Ariel Eliorad5afc82013-01-01 05:22:26 +00002821 if (bp->port.pmf) {
2822 rc = bnx2x_initial_phy_init(bp, load_mode);
2823 if (rc)
2824 LOAD_ERROR_EXIT(bp, load_error3);
2825 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002826 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827
2828 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002829
2830 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002831 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002832
Michal Kalderoneeed0182014-08-17 16:47:44 +03002833 if (bp->flags & PTP_SUPPORTED) {
2834 bnx2x_init_ptp(bp);
2835 bnx2x_configure_ptp_filters(bp);
2836 }
2837 /* Start Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002838 switch (load_mode) {
2839 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002840 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002841 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002842 break;
2843
2844 case LOAD_OPEN:
2845 netif_tx_start_all_queues(bp->dev);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002846 smp_mb__after_atomic();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002847 break;
2848
2849 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002850 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002851 bp->state = BNX2X_STATE_DIAG;
2852 break;
2853
2854 default:
2855 break;
2856 }
2857
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002858 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002859 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002860 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002861 bnx2x__link_status_update(bp);
2862
2863 /* start the timer */
2864 mod_timer(&bp->timer, jiffies + bp->current_interval);
2865
Merav Sicron55c11942012-11-07 00:45:48 +00002866 if (CNIC_ENABLED(bp))
2867 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002868
Yuval Mintz42f82772014-03-23 18:12:23 +02002869 if (IS_PF(bp))
2870 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2871
Ariel Eliorad5afc82013-01-01 05:22:26 +00002872 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2873 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002874 u32 val;
2875 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2876 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2877 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2878 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2879 }
2880
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002881 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002882 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002883 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002884 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002885 return -EBUSY;
2886 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002887
Barak Witkowski98768792012-06-19 07:48:31 +00002888 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2889 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2890 bnx2x_dcbx_init(bp, false);
2891
Merav Sicron55c11942012-11-07 00:45:48 +00002892 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2893
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002894 return 0;
2895
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002896#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002897load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002898 if (IS_PF(bp)) {
2899 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002900
Ariel Eliorad5afc82013-01-01 05:22:26 +00002901 /* Clean queueable objects */
2902 bnx2x_squeeze_objects(bp);
2903 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002904
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002905 /* Free SKBs, SGEs, TPA pool and driver internals */
2906 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002907 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002908 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002910 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002911 bnx2x_free_irq(bp);
2912load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002913 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002914 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2915 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2916 }
2917
2918 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002919load_error1:
2920 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002921 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002922
Ariel Elior889b9af2012-01-26 06:01:51 +00002923 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002924 if (IS_PF(bp))
2925 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002926load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002927 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002928 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002929 bnx2x_free_mem(bp);
2930
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002931 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002932#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002933}
2934
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002935int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002936{
2937 u8 rc = 0, cos, i;
2938
2939 /* Wait until tx fastpath tasks complete */
2940 for_each_tx_queue(bp, i) {
2941 struct bnx2x_fastpath *fp = &bp->fp[i];
2942
2943 for_each_cos_in_tx_queue(fp, cos)
2944 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2945 if (rc)
2946 return rc;
2947 }
2948 return 0;
2949}
2950
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002951/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002952int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002953{
2954 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002955 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002956
Merav Sicron55c11942012-11-07 00:45:48 +00002957 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2958
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002959 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002960 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002961 u32 val;
2962 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2963 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2964 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2965 }
2966
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002967 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002968 (bp->state == BNX2X_STATE_CLOSED ||
2969 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002970 /* We can get here if the driver has been unloaded
2971 * during parity error recovery and is either waiting for a
2972 * leader to complete or for other functions to unload and
2973 * then ifdown has been issued. In this case we want to
2974 * unload and let other functions to complete a recovery
2975 * process.
2976 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002977 bp->recovery_state = BNX2X_RECOVERY_DONE;
2978 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002979 bnx2x_release_leader_lock(bp);
2980 smp_mb();
2981
Merav Sicron51c1a582012-03-18 10:33:38 +00002982 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2983 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002984 return -EINVAL;
2985 }
2986
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002987 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002988 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002989 *
2990 * we can get here only after unsuccessful ndo_* callback, during which
2991 * dev->IFF_UP flag is still on.
2992 */
2993 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2994 return 0;
2995
2996 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002997 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2998 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2999 */
3000 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3001 smp_mb();
3002
Ariel Elior78c3bcc2013-06-20 17:39:08 +03003003 /* indicate to VFs that the PF is going down */
3004 bnx2x_iov_channel_down(bp);
3005
Merav Sicron55c11942012-11-07 00:45:48 +00003006 if (CNIC_LOADED(bp))
3007 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3008
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003009 /* Stop Tx */
3010 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003011 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003012
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003013 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003014
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003015 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003016
Ariel Eliorad5afc82013-01-01 05:22:26 +00003017 if (IS_PF(bp)) {
3018 /* Set ALWAYS_ALIVE bit in shmem */
3019 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3020 bnx2x_drv_pulse(bp);
3021 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3022 bnx2x_save_statistics(bp);
3023 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003024
Ariel Eliorad5afc82013-01-01 05:22:26 +00003025 /* wait till consumers catch up with producers in all queues */
3026 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003027
Ariel Elior9b176b62013-01-01 05:22:28 +00003028 /* if VF indicate to PF this function is going down (PF will delete sp
3029 * elements and clear initializations
3030 */
3031 if (IS_VF(bp))
3032 bnx2x_vfpf_close_vf(bp);
3033 else if (unload_mode != UNLOAD_RECOVERY)
3034 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00003035 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003036 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003037 /* Send the UNLOAD_REQUEST to the MCP */
3038 bnx2x_send_unload_req(bp, unload_mode);
3039
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003040 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003041 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003042 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003043 * (the engine which leader will perform the recovery
3044 * last).
3045 */
3046 if (!CHIP_IS_E1x(bp))
3047 bnx2x_pf_disable(bp);
3048
3049 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003050 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00003051 /* Delete all NAPI objects */
3052 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003053 if (CNIC_LOADED(bp))
3054 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003055 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003056 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003057
3058 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00003059 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003060 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003061
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003062 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003063 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003064 * the queueable objects here in case they failed to get cleaned so far.
3065 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003066 if (IS_PF(bp))
3067 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003068
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003069 /* There should be no more pending SP commands at this stage */
3070 bp->sp_state = 0;
3071
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003072 bp->port.pmf = 0;
3073
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003074 /* clear pending work in rtnl task */
3075 bp->sp_rtnl_state = 0;
3076 smp_mb();
3077
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003078 /* Free SKBs, SGEs, TPA pool and driver internals */
3079 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003080 if (CNIC_LOADED(bp))
3081 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003082 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003083 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003084
Ariel Eliorad5afc82013-01-01 05:22:26 +00003085 bnx2x_free_fp_mem(bp);
3086 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003087 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003088
Ariel Eliorad5afc82013-01-01 05:22:26 +00003089 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003090 if (CNIC_LOADED(bp))
3091 bnx2x_free_mem_cnic(bp);
3092 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003093 bnx2x_free_mem(bp);
3094
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003095 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003096 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003097
Yuval Mintz42f82772014-03-23 18:12:23 +02003098 /* Clear driver version indication in shmem */
3099 if (IS_PF(bp))
3100 bnx2x_update_mng_version(bp);
3101
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003102 /* Check if there are pending parity attentions. If there are - set
3103 * RECOVERY_IN_PROGRESS.
3104 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003105 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003106 bnx2x_set_reset_in_progress(bp);
3107
3108 /* Set RESET_IS_GLOBAL if needed */
3109 if (global)
3110 bnx2x_set_reset_global(bp);
3111 }
3112
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003113 /* The last driver must disable a "close the gate" if there is no
3114 * parity attention or "process kill" pending.
3115 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003116 if (IS_PF(bp) &&
3117 !bnx2x_clear_pf_load(bp) &&
3118 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003119 bnx2x_disable_close_the_gate(bp);
3120
Merav Sicron55c11942012-11-07 00:45:48 +00003121 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3122
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003123 return 0;
3124}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003125
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003126int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3127{
3128 u16 pmcsr;
3129
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003130 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003131 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003132 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003133 return 0;
3134 }
3135
Jon Mason29ed74c2013-09-11 11:22:39 -07003136 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003137
3138 switch (state) {
3139 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003140 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003141 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3142 PCI_PM_CTRL_PME_STATUS));
3143
3144 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3145 /* delay required during transition out of D3hot */
3146 msleep(20);
3147 break;
3148
3149 case PCI_D3hot:
3150 /* If there are other clients above don't
3151 shut down the power */
3152 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3153 return 0;
3154 /* Don't shut down the power for emulation and FPGA */
3155 if (CHIP_REV_IS_SLOW(bp))
3156 return 0;
3157
3158 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3159 pmcsr |= 3;
3160
3161 if (bp->wol)
3162 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3163
Jon Mason29ed74c2013-09-11 11:22:39 -07003164 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003165 pmcsr);
3166
3167 /* No more memory access after this point until
3168 * device is brought back to D0.
3169 */
3170 break;
3171
3172 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003173 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003174 return -EINVAL;
3175 }
3176 return 0;
3177}
3178
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003179/*
3180 * net_device service functions
3181 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003182static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003183{
3184 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003185 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003186 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3187 napi);
3188 struct bnx2x *bp = fp->bp;
3189
3190 while (1) {
3191#ifdef BNX2X_STOP_ON_ERROR
3192 if (unlikely(bp->panic)) {
3193 napi_complete(napi);
3194 return 0;
3195 }
3196#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003197 if (!bnx2x_fp_lock_napi(fp))
Govindarajulu Varadarajan24e579c2015-01-25 16:09:23 +05303198 return budget;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003199
Ariel Elior6383c0b2011-07-14 08:31:57 +00003200 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003201 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3202 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003203
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003204 if (bnx2x_has_rx_work(fp)) {
3205 work_done += bnx2x_rx_int(fp, budget - work_done);
3206
3207 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003208 if (work_done >= budget) {
3209 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003210 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003211 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003212 }
3213
Eric Dumazet074975d2015-04-14 18:45:00 -07003214 bnx2x_fp_unlock_napi(fp);
3215
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003216 /* Fall out from the NAPI loop if needed */
Eric Dumazet074975d2015-04-14 18:45:00 -07003217 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003218
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003219 /* No need to update SB for FCoE L2 ring as long as
3220 * it's connected to the default SB and the SB
3221 * has been updated when NAPI was scheduled.
3222 */
3223 if (IS_FCOE_FP(fp)) {
3224 napi_complete(napi);
3225 break;
3226 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003227 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003228 /* bnx2x_has_rx_work() reads the status block,
3229 * thus we need to ensure that status block indices
3230 * have been actually read (bnx2x_update_fpsb_idx)
3231 * prior to this check (bnx2x_has_rx_work) so that
3232 * we won't write the "newer" value of the status block
3233 * to IGU (if there was a DMA right after
3234 * bnx2x_has_rx_work and if there is no rmb, the memory
3235 * reading (bnx2x_update_fpsb_idx) may be postponed
3236 * to right before bnx2x_ack_sb). In this case there
3237 * will never be another interrupt until there is
3238 * another update of the status block, while there
3239 * is still unhandled work.
3240 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003241 rmb();
3242
3243 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3244 napi_complete(napi);
3245 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003246 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003247 "Update index to %d\n", fp->fp_hc_idx);
3248 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3249 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003250 IGU_INT_ENABLE, 1);
3251 break;
3252 }
3253 }
3254 }
3255
3256 return work_done;
3257}
3258
Cong Wange0d10952013-08-01 11:10:25 +08003259#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003260/* must be called with local_bh_disable()d */
3261int bnx2x_low_latency_recv(struct napi_struct *napi)
3262{
3263 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3264 napi);
3265 struct bnx2x *bp = fp->bp;
3266 int found = 0;
3267
3268 if ((bp->state == BNX2X_STATE_CLOSED) ||
3269 (bp->state == BNX2X_STATE_ERROR) ||
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02003270 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003271 return LL_FLUSH_FAILED;
3272
3273 if (!bnx2x_fp_lock_poll(fp))
3274 return LL_FLUSH_BUSY;
3275
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003276 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003277 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003278
3279 bnx2x_fp_unlock_poll(fp);
3280
3281 return found;
3282}
3283#endif
3284
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003285/* we split the first BD into headers and data BDs
3286 * to ease the pain of our fellow microcode engineers
3287 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003288 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003289static u16 bnx2x_tx_split(struct bnx2x *bp,
3290 struct bnx2x_fp_txdata *txdata,
3291 struct sw_tx_bd *tx_buf,
3292 struct eth_tx_start_bd **tx_bd, u16 hlen,
3293 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003294{
3295 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3296 struct eth_tx_bd *d_tx_bd;
3297 dma_addr_t mapping;
3298 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3299
3300 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003301 h_tx_bd->nbytes = cpu_to_le16(hlen);
3302
Dmitry Kravkov91226792013-03-11 05:17:52 +00003303 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3304 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003305
3306 /* now get a new data BD
3307 * (after the pbd) and fill it */
3308 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003309 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003310
3311 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3312 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3313
3314 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3315 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3316 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3317
3318 /* this marks the BD as one that has no individual mapping */
3319 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3320
3321 DP(NETIF_MSG_TX_QUEUED,
3322 "TSO split data size is %d (%x:%x)\n",
3323 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3324
3325 /* update tx_bd */
3326 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3327
3328 return bd_prod;
3329}
3330
Yuval Mintz86564c32013-01-23 03:21:50 +00003331#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3332#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003333static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003334{
Yuval Mintz86564c32013-01-23 03:21:50 +00003335 __sum16 tsum = (__force __sum16) csum;
3336
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003337 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003338 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3339 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003340
3341 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003342 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3343 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003344
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003345 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003346}
3347
Dmitry Kravkov91226792013-03-11 05:17:52 +00003348static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003349{
3350 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003351 __u8 prot = 0;
3352 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003353
3354 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003355 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003356
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003357 protocol = vlan_get_protocol(skb);
3358 if (protocol == htons(ETH_P_IPV6)) {
3359 rc = XMIT_CSUM_V6;
3360 prot = ipv6_hdr(skb)->nexthdr;
3361 } else {
3362 rc = XMIT_CSUM_V4;
3363 prot = ip_hdr(skb)->protocol;
3364 }
3365
3366 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3367 if (inner_ip_hdr(skb)->version == 6) {
3368 rc |= XMIT_CSUM_ENC_V6;
3369 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003370 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003371 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003372 rc |= XMIT_CSUM_ENC_V4;
3373 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003374 rc |= XMIT_CSUM_TCP;
3375 }
3376 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003377 if (prot == IPPROTO_TCP)
3378 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003379
Eric Dumazet36a8f392013-09-29 01:21:32 -07003380 if (skb_is_gso(skb)) {
3381 if (skb_is_gso_v6(skb)) {
3382 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3383 if (rc & XMIT_CSUM_ENC)
3384 rc |= XMIT_GSO_ENC_V6;
3385 } else {
3386 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3387 if (rc & XMIT_CSUM_ENC)
3388 rc |= XMIT_GSO_ENC_V4;
3389 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003390 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003391
3392 return rc;
3393}
3394
3395#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3396/* check if packet requires linearization (packet is too fragmented)
3397 no need to check fragmentation if page size > 8K (there will be no
3398 violation to FW restrictions) */
3399static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3400 u32 xmit_type)
3401{
3402 int to_copy = 0;
3403 int hlen = 0;
3404 int first_bd_sz = 0;
3405
3406 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3407 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3408
3409 if (xmit_type & XMIT_GSO) {
3410 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3411 /* Check if LSO packet needs to be copied:
3412 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3413 int wnd_size = MAX_FETCH_BD - 3;
3414 /* Number of windows to check */
3415 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3416 int wnd_idx = 0;
3417 int frag_idx = 0;
3418 u32 wnd_sum = 0;
3419
3420 /* Headers length */
Yuval Mintz592b9b82015-06-25 15:19:29 +03003421 if (xmit_type & XMIT_GSO_ENC)
3422 hlen = (int)(skb_inner_transport_header(skb) -
3423 skb->data) +
3424 inner_tcp_hdrlen(skb);
3425 else
3426 hlen = (int)(skb_transport_header(skb) -
3427 skb->data) + tcp_hdrlen(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003428
3429 /* Amount of data (w/o headers) on linear part of SKB*/
3430 first_bd_sz = skb_headlen(skb) - hlen;
3431
3432 wnd_sum = first_bd_sz;
3433
3434 /* Calculate the first sum - it's special */
3435 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3436 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003437 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003438
3439 /* If there was data on linear skb data - check it */
3440 if (first_bd_sz > 0) {
3441 if (unlikely(wnd_sum < lso_mss)) {
3442 to_copy = 1;
3443 goto exit_lbl;
3444 }
3445
3446 wnd_sum -= first_bd_sz;
3447 }
3448
3449 /* Others are easier: run through the frag list and
3450 check all windows */
3451 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3452 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003453 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003454
3455 if (unlikely(wnd_sum < lso_mss)) {
3456 to_copy = 1;
3457 break;
3458 }
3459 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003460 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003461 }
3462 } else {
3463 /* in non-LSO too fragmented packet should always
3464 be linearized */
3465 to_copy = 1;
3466 }
3467 }
3468
3469exit_lbl:
3470 if (unlikely(to_copy))
3471 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003472 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003473 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3474 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3475
3476 return to_copy;
3477}
3478#endif
3479
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003480/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003481 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003482 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003483 * @skb: packet skb
3484 * @pbd: parse BD
3485 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003486 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003487static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3488 struct eth_tx_parse_bd_e1x *pbd,
3489 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003490{
3491 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003492 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003493 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003494
3495 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003496 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003497 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003498 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3499 ip_hdr(skb)->daddr,
3500 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003501 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003502 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003503 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3504 &ipv6_hdr(skb)->daddr,
3505 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003506 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003507
Yuval Mintz86564c32013-01-23 03:21:50 +00003508 pbd->global_data |=
3509 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003510}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003511
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003512/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003513 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3514 *
3515 * @bp: driver handle
3516 * @skb: packet skb
3517 * @parsing_data: data to be updated
3518 * @xmit_type: xmit flags
3519 *
3520 * 57712/578xx related, when skb has encapsulation
3521 */
3522static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3523 u32 *parsing_data, u32 xmit_type)
3524{
3525 *parsing_data |=
3526 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3527 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3528 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3529
3530 if (xmit_type & XMIT_CSUM_TCP) {
3531 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3532 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3533 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3534
3535 return skb_inner_transport_header(skb) +
3536 inner_tcp_hdrlen(skb) - skb->data;
3537 }
3538
3539 /* We support checksum offload for TCP and UDP only.
3540 * No need to pass the UDP header length - it's a constant.
3541 */
3542 return skb_inner_transport_header(skb) +
3543 sizeof(struct udphdr) - skb->data;
3544}
3545
3546/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003547 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003548 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003549 * @bp: driver handle
3550 * @skb: packet skb
3551 * @parsing_data: data to be updated
3552 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003553 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003554 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003555 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003556static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3557 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003558{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003559 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003560 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003561 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3562 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003563
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003564 if (xmit_type & XMIT_CSUM_TCP) {
3565 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3566 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3567 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003568
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003569 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003570 }
3571 /* We support checksum offload for TCP and UDP only.
3572 * No need to pass the UDP header length - it's a constant.
3573 */
3574 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003575}
3576
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003577/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003578static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3579 struct eth_tx_start_bd *tx_start_bd,
3580 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003581{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003582 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3583
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003584 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003585 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003586
3587 if (!(xmit_type & XMIT_CSUM_TCP))
3588 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003589}
3590
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003591/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003592 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003593 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003594 * @bp: driver handle
3595 * @skb: packet skb
3596 * @pbd: parse BD to be updated
3597 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003598 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003599static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3600 struct eth_tx_parse_bd_e1x *pbd,
3601 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003602{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003603 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003604
3605 /* for now NS flag is not used in Linux */
3606 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003607 cpu_to_le16(hlen |
3608 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3609 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003610
3611 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003612 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003613
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003614 hlen += pbd->ip_hlen_w;
3615
3616 /* We support checksum offload for TCP and UDP only */
3617 if (xmit_type & XMIT_CSUM_TCP)
3618 hlen += tcp_hdrlen(skb) / 2;
3619 else
3620 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003621
3622 pbd->total_hlen_w = cpu_to_le16(hlen);
3623 hlen = hlen*2;
3624
3625 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003626 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003627
3628 } else {
3629 s8 fix = SKB_CS_OFF(skb); /* signed! */
3630
3631 DP(NETIF_MSG_TX_QUEUED,
3632 "hlen %d fix %d csum before fix %x\n",
3633 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3634
3635 /* HW bug: fixup the CSUM */
3636 pbd->tcp_pseudo_csum =
3637 bnx2x_csum_fix(skb_transport_header(skb),
3638 SKB_CS(skb), fix);
3639
3640 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3641 pbd->tcp_pseudo_csum);
3642 }
3643
3644 return hlen;
3645}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003646
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003647static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3648 struct eth_tx_parse_bd_e2 *pbd_e2,
3649 struct eth_tx_parse_2nd_bd *pbd2,
3650 u16 *global_data,
3651 u32 xmit_type)
3652{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003653 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003654 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003655
Dmitry Kravkove287a752013-03-21 15:38:24 +00003656 /* from outer IP to transport */
3657 hlen_w = (skb_inner_transport_header(skb) -
3658 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003659
3660 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003661 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003662
Dmitry Kravkove287a752013-03-21 15:38:24 +00003663 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003664
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003665 /* outer IP header info */
3666 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003667 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003668 u32 csum = (__force u32)(~iph->check) -
3669 (__force u32)iph->tot_len -
3670 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003671
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003672 outerip_len = iph->ihl << 1;
3673
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003674 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003675 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003676 } else {
3677 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003678 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003679 pbd_e2->data.tunnel_data.flags |=
Dmitry Kravkov05f84612014-08-28 16:54:24 +03003680 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003681 }
3682
3683 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3684
3685 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3686
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003687 /* inner IP header info */
3688 if (xmit_type & XMIT_CSUM_ENC_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003689 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003690
3691 pbd_e2->data.tunnel_data.pseudo_csum =
3692 bswab16(~csum_tcpudp_magic(
3693 inner_ip_hdr(skb)->saddr,
3694 inner_ip_hdr(skb)->daddr,
3695 0, IPPROTO_TCP, 0));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003696 } else {
3697 pbd_e2->data.tunnel_data.pseudo_csum =
3698 bswab16(~csum_ipv6_magic(
3699 &inner_ipv6_hdr(skb)->saddr,
3700 &inner_ipv6_hdr(skb)->daddr,
3701 0, IPPROTO_TCP, 0));
3702 }
3703
3704 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3705
3706 *global_data |=
3707 outerip_off |
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003708 (outerip_len <<
3709 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3710 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3711 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003712
3713 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3714 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3715 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3716 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003717}
3718
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003719static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3720 u32 xmit_type)
3721{
3722 struct ipv6hdr *ipv6;
3723
3724 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3725 return;
3726
3727 if (xmit_type & XMIT_GSO_ENC_V6)
3728 ipv6 = inner_ipv6_hdr(skb);
3729 else /* XMIT_GSO_V6 */
3730 ipv6 = ipv6_hdr(skb);
3731
3732 if (ipv6->nexthdr == NEXTHDR_IPV6)
3733 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3734}
3735
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003736/* called with netif_tx_lock
3737 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3738 * netif_wake_queue()
3739 */
3740netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3741{
3742 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003743
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003744 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003745 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003746 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003747 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003748 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003749 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003750 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003751 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003752 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003753 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003754 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003755 dma_addr_t mapping;
3756 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3757 int i;
3758 u8 hlen = 0;
3759 __le16 pkt_size = 0;
3760 struct ethhdr *eth;
3761 u8 mac_type = UNICAST_ADDRESS;
3762
3763#ifdef BNX2X_STOP_ON_ERROR
3764 if (unlikely(bp->panic))
3765 return NETDEV_TX_BUSY;
3766#endif
3767
Ariel Elior6383c0b2011-07-14 08:31:57 +00003768 txq_index = skb_get_queue_mapping(skb);
3769 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003770
Merav Sicron55c11942012-11-07 00:45:48 +00003771 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003772
Merav Sicron65565882012-06-19 07:48:26 +00003773 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003774
3775 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003776 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003777 txq_index, fp_index, txdata_index); */
3778
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003779 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003780 DP(NETIF_MSG_TX_QUEUED,
3781 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003782 txdata->cid, fp_index, txdata_index, txdata, fp); */
3783
3784 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003785 skb_shinfo(skb)->nr_frags +
3786 BDS_PER_TX_PKT +
3787 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003788 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003789 if (txdata->tx_ring_size == 0) {
3790 struct bnx2x_eth_q_stats *q_stats =
3791 bnx2x_fp_qstats(bp, txdata->parent_fp);
3792 q_stats->driver_filtered_tx_pkt++;
3793 dev_kfree_skb(skb);
3794 return NETDEV_TX_OK;
3795 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003796 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3797 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003798 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003799
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003800 return NETDEV_TX_BUSY;
3801 }
3802
Merav Sicron51c1a582012-03-18 10:33:38 +00003803 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003804 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003805 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003806 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3807 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003808
3809 eth = (struct ethhdr *)skb->data;
3810
3811 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3812 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3813 if (is_broadcast_ether_addr(eth->h_dest))
3814 mac_type = BROADCAST_ADDRESS;
3815 else
3816 mac_type = MULTICAST_ADDRESS;
3817 }
3818
Dmitry Kravkov91226792013-03-11 05:17:52 +00003819#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003820 /* First, check if we need to linearize the skb (due to FW
3821 restrictions). No need to check fragmentation if page size > 8K
3822 (there will be no violation to FW restrictions) */
3823 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3824 /* Statistics of linearization */
3825 bp->lin_cnt++;
3826 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003827 DP(NETIF_MSG_TX_QUEUED,
3828 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003829 dev_kfree_skb_any(skb);
3830 return NETDEV_TX_OK;
3831 }
3832 }
3833#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003834 /* Map skb linear data for DMA */
3835 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3836 skb_headlen(skb), DMA_TO_DEVICE);
3837 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003838 DP(NETIF_MSG_TX_QUEUED,
3839 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003840 dev_kfree_skb_any(skb);
3841 return NETDEV_TX_OK;
3842 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003843 /*
3844 Please read carefully. First we use one BD which we mark as start,
3845 then we have a parsing info BD (used for TSO or xsum),
3846 and only then we have the rest of the TSO BDs.
3847 (don't forget to mark the last one as last,
3848 and to unmap only AFTER you write to the BD ...)
3849 And above all, all pdb sizes are in words - NOT DWORDS!
3850 */
3851
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003852 /* get current pkt produced now - advance it just before sending packet
3853 * since mapping of pages may fail and cause packet to be dropped
3854 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003855 pkt_prod = txdata->tx_pkt_prod;
3856 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003857
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003858 /* get a tx_buf and first BD
3859 * tx_start_bd may be changed during SPLIT,
3860 * but first_bd will always stay first
3861 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003862 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3863 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003864 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003865
3866 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003867
Michal Kalderoneeed0182014-08-17 16:47:44 +03003868 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3869 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3870 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3871 } else if (bp->ptp_tx_skb) {
3872 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3873 } else {
3874 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3875 /* schedule check for Tx timestamp */
3876 bp->ptp_tx_skb = skb_get(skb);
3877 bp->ptp_tx_start = jiffies;
3878 schedule_work(&bp->ptp_task);
3879 }
3880 }
3881
Dmitry Kravkov91226792013-03-11 05:17:52 +00003882 /* header nbd: indirectly zero other flags! */
3883 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003884
3885 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003886 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003887 tx_buf->skb = skb;
3888 tx_buf->flags = 0;
3889
3890 DP(NETIF_MSG_TX_QUEUED,
3891 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003892 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003893
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003894 if (skb_vlan_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003895 tx_start_bd->vlan_or_ethertype =
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003896 cpu_to_le16(skb_vlan_tag_get(skb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003897 tx_start_bd->bd_flags.as_bitfield |=
3898 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003899 } else {
3900 /* when transmitting in a vf, start bd must hold the ethertype
3901 * for fw to enforce it
3902 */
Yuval Mintzea36475a2014-08-25 17:48:30 +03003903#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov91226792013-03-11 05:17:52 +00003904 if (IS_VF(bp))
Yuval Mintzea36475a2014-08-25 17:48:30 +03003905#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003906 tx_start_bd->vlan_or_ethertype =
3907 cpu_to_le16(ntohs(eth->h_proto));
Yuval Mintzea36475a2014-08-25 17:48:30 +03003908#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov91226792013-03-11 05:17:52 +00003909 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003910 /* used by FW for packet accounting */
3911 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003912#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003913 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003914
Dmitry Kravkov91226792013-03-11 05:17:52 +00003915 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3916
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003917 /* turn on parsing and get a BD */
3918 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003919
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003920 if (xmit_type & XMIT_CSUM)
3921 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003922
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003923 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003924 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003925 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003926
3927 if (xmit_type & XMIT_CSUM_ENC) {
3928 u16 global_data = 0;
3929
3930 /* Set PBD in enc checksum offload case */
3931 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3932 &pbd_e2_parsing_data,
3933 xmit_type);
3934
3935 /* turn on 2nd parsing and get a BD */
3936 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3937
3938 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3939
3940 memset(pbd2, 0, sizeof(*pbd2));
3941
3942 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3943 (skb_inner_network_header(skb) -
3944 skb->data) >> 1;
3945
3946 if (xmit_type & XMIT_GSO_ENC)
3947 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3948 &global_data,
3949 xmit_type);
3950
3951 pbd2->global_data = cpu_to_le16(global_data);
3952
3953 /* add addition parse BD indication to start BD */
3954 SET_FLAG(tx_start_bd->general_data,
3955 ETH_TX_START_BD_PARSE_NBDS, 1);
3956 /* set encapsulation flag in start BD */
3957 SET_FLAG(tx_start_bd->general_data,
3958 ETH_TX_START_BD_TUNNEL_EXIST, 1);
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +03003959
3960 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3961
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003962 nbd++;
3963 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003964 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003965 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3966 &pbd_e2_parsing_data,
3967 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003968 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003969
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003970 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
Yuval Mintzbabe7232014-02-27 15:42:26 +02003971 /* Add the macs to the parsing BD if this is a vf or if
3972 * Tx Switching is enabled.
3973 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003974 if (IS_VF(bp)) {
3975 /* override GRE parameters in BD */
3976 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3977 &pbd_e2->data.mac_addr.src_mid,
3978 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003979 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003980
3981 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3982 &pbd_e2->data.mac_addr.dst_mid,
3983 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003984 eth->h_dest);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003985 } else {
3986 if (bp->flags & TX_SWITCHING)
3987 bnx2x_set_fw_mac_addr(
3988 &pbd_e2->data.mac_addr.dst_hi,
3989 &pbd_e2->data.mac_addr.dst_mid,
3990 &pbd_e2->data.mac_addr.dst_lo,
3991 eth->h_dest);
3992#ifdef BNX2X_STOP_ON_ERROR
3993 /* Enforce security is always set in Stop on Error -
3994 * source mac should be present in the parsing BD
3995 */
3996 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3997 &pbd_e2->data.mac_addr.src_mid,
3998 &pbd_e2->data.mac_addr.src_lo,
3999 eth->h_source);
4000#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004001 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004002
4003 SET_FLAG(pbd_e2_parsing_data,
4004 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004005 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004006 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004007 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004008 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4009 /* Set PBD in checksum offload case */
4010 if (xmit_type & XMIT_CSUM)
4011 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004012
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004013 SET_FLAG(global_data,
4014 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4015 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004016 }
4017
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004018 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004019 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4020 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004021 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4022 pkt_size = tx_start_bd->nbytes;
4023
Merav Sicron51c1a582012-03-18 10:33:38 +00004024 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004025 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004026 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004027 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004028 tx_start_bd->bd_flags.as_bitfield,
4029 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004030
4031 if (xmit_type & XMIT_GSO) {
4032
4033 DP(NETIF_MSG_TX_QUEUED,
4034 "TSO packet len %d hlen %d total len %d tso size %d\n",
4035 skb->len, hlen, skb_headlen(skb),
4036 skb_shinfo(skb)->gso_size);
4037
4038 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4039
Dmitry Kravkov91226792013-03-11 05:17:52 +00004040 if (unlikely(skb_headlen(skb) > hlen)) {
4041 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004042 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4043 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004044 bd_prod);
4045 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004046 if (!CHIP_IS_E1x(bp))
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004047 pbd_e2_parsing_data |=
4048 (skb_shinfo(skb)->gso_size <<
4049 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4050 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004051 else
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004052 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004053 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00004054
4055 /* Set the PBD's parsing_data field if not zero
4056 * (for the chips newer than 57711).
4057 */
4058 if (pbd_e2_parsing_data)
4059 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4060
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004061 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4062
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004063 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4065 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4066
Eric Dumazet9e903e02011-10-18 21:00:24 +00004067 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4068 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004069 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00004070 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004071
Merav Sicron51c1a582012-03-18 10:33:38 +00004072 DP(NETIF_MSG_TX_QUEUED,
4073 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004074
4075 /* we need unmap all buffers already mapped
4076 * for this SKB;
4077 * first_bd->nbd need to be properly updated
4078 * before call to bnx2x_free_tx_pkt
4079 */
4080 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004081 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00004082 TX_BD(txdata->tx_pkt_prod),
4083 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004084 return NETDEV_TX_OK;
4085 }
4086
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004087 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00004088 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004089 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00004090 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004091
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004092 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4093 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00004094 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4095 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004096 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004097
4098 DP(NETIF_MSG_TX_QUEUED,
4099 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4100 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4101 le16_to_cpu(tx_data_bd->nbytes));
4102 }
4103
4104 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4105
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004106 /* update with actual num BDs */
4107 first_bd->nbd = cpu_to_le16(nbd);
4108
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4110
4111 /* now send a tx doorbell, counting the next BD
4112 * if the packet contains or ends with it
4113 */
4114 if (TX_BD_POFF(bd_prod) < nbd)
4115 nbd++;
4116
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004117 /* total_pkt_bytes should be set on the first data BD if
4118 * it's not an LSO packet and there is more than one
4119 * data BD. In this case pkt_size is limited by an MTU value.
4120 * However we prefer to set it for an LSO packet (while we don't
4121 * have to) in order to save some CPU cycles in a none-LSO
4122 * case, when we much more care about them.
4123 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004124 if (total_pkt_bd != NULL)
4125 total_pkt_bd->total_pkt_bytes = pkt_size;
4126
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004127 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004128 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004129 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004130 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4131 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4132 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4133 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004134 if (pbd_e2)
4135 DP(NETIF_MSG_TX_QUEUED,
4136 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004137 pbd_e2,
4138 pbd_e2->data.mac_addr.dst_hi,
4139 pbd_e2->data.mac_addr.dst_mid,
4140 pbd_e2->data.mac_addr.dst_lo,
4141 pbd_e2->data.mac_addr.src_hi,
4142 pbd_e2->data.mac_addr.src_mid,
4143 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004144 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004145 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4146
Tom Herbert2df1a702011-11-28 16:33:37 +00004147 netdev_tx_sent_queue(txq, skb->len);
4148
Willem de Bruijn8373c572012-04-27 09:04:06 +00004149 skb_tx_timestamp(skb);
4150
Ariel Elior6383c0b2011-07-14 08:31:57 +00004151 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004152 /*
4153 * Make sure that the BD data is updated before updating the producer
4154 * since FW might read the BD right after the producer is updated.
4155 * This is only applicable for weak-ordered memory model archs such
4156 * as IA-64. The following barrier is also mandatory since FW will
4157 * assumes packets must have BDs.
4158 */
4159 wmb();
4160
Ariel Elior6383c0b2011-07-14 08:31:57 +00004161 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004162 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004163
Ariel Elior6383c0b2011-07-14 08:31:57 +00004164 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004165
4166 mmiowb();
4167
Ariel Elior6383c0b2011-07-14 08:31:57 +00004168 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004169
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004170 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004171 netif_tx_stop_queue(txq);
4172
4173 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4174 * ordering of set_bit() in netif_tx_stop_queue() and read of
4175 * fp->bd_tx_cons */
4176 smp_mb();
4177
Barak Witkowski15192a82012-06-19 07:48:28 +00004178 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004179 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004180 netif_tx_wake_queue(txq);
4181 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004182 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004183
4184 return NETDEV_TX_OK;
4185}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004186
Ariel Elior6383c0b2011-07-14 08:31:57 +00004187/**
4188 * bnx2x_setup_tc - routine to configure net_device for multi tc
4189 *
4190 * @netdev: net device to configure
4191 * @tc: number of traffic classes to enable
4192 *
4193 * callback connected to the ndo_setup_tc function pointer
4194 */
4195int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4196{
4197 int cos, prio, count, offset;
4198 struct bnx2x *bp = netdev_priv(dev);
4199
4200 /* setup tc must be called under rtnl lock */
4201 ASSERT_RTNL();
4202
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004203 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004204 if (!num_tc) {
4205 netdev_reset_tc(dev);
4206 return 0;
4207 }
4208
4209 /* requested to support too many traffic classes */
4210 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004211 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004212 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004213 return -EINVAL;
4214 }
4215
4216 /* declare amount of supported traffic classes */
4217 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004218 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004219 return -EINVAL;
4220 }
4221
4222 /* configure priority to traffic class mapping */
4223 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4224 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004225 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4226 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004227 prio, bp->prio_to_cos[prio]);
4228 }
4229
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004230 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004231 This can be used for ets or pfc, and save the effort of setting
4232 up a multio class queue disc or negotiating DCBX with a switch
4233 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004234 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004235 for (prio = 1; prio < 16; prio++) {
4236 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004237 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004238 } */
4239
4240 /* configure traffic class to transmission queue mapping */
4241 for (cos = 0; cos < bp->max_cos; cos++) {
4242 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004243 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004244 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004245 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4246 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004247 cos, offset, count);
4248 }
4249
4250 return 0;
4251}
4252
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004253/* called with rtnl_lock */
4254int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4255{
4256 struct sockaddr *addr = p;
4257 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004258 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004259
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004260 if (!is_valid_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004261 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004262 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004263 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004264
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004265 if (IS_MF_STORAGE_ONLY(bp)) {
4266 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004267 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004268 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004269
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004270 if (netif_running(dev)) {
4271 rc = bnx2x_set_eth_mac(bp, false);
4272 if (rc)
4273 return rc;
4274 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004275
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004276 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4277
4278 if (netif_running(dev))
4279 rc = bnx2x_set_eth_mac(bp, true);
4280
4281 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004282}
4283
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004284static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4285{
4286 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4287 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004288 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004289
4290 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004291
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004292 if (IS_FCOE_IDX(fp_index)) {
4293 memset(sb, 0, sizeof(union host_hc_status_block));
4294 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004295 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004296 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004297 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004298 BNX2X_PCI_FREE(sb->e2_sb,
4299 bnx2x_fp(bp, fp_index,
4300 status_blk_mapping),
4301 sizeof(struct host_hc_status_block_e2));
4302 else
4303 BNX2X_PCI_FREE(sb->e1x_sb,
4304 bnx2x_fp(bp, fp_index,
4305 status_blk_mapping),
4306 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004307 }
Merav Sicron55c11942012-11-07 00:45:48 +00004308
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004309 /* Rx */
4310 if (!skip_rx_queue(bp, fp_index)) {
4311 bnx2x_free_rx_bds(fp);
4312
4313 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4314 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4315 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4316 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4317 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4318
4319 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4320 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4321 sizeof(struct eth_fast_path_rx_cqe) *
4322 NUM_RCQ_BD);
4323
4324 /* SGE ring */
4325 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4326 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4327 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4328 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4329 }
4330
4331 /* Tx */
4332 if (!skip_tx_queue(bp, fp_index)) {
4333 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004334 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004335 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004336
Merav Sicron51c1a582012-03-18 10:33:38 +00004337 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004338 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004339 fp_index, cos, txdata->cid);
4340
4341 BNX2X_FREE(txdata->tx_buf_ring);
4342 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4343 txdata->tx_desc_mapping,
4344 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4345 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004346 }
4347 /* end of fastpath */
4348}
4349
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004350static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004351{
4352 int i;
4353 for_each_cnic_queue(bp, i)
4354 bnx2x_free_fp_mem_at(bp, i);
4355}
4356
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004357void bnx2x_free_fp_mem(struct bnx2x *bp)
4358{
4359 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004360 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004361 bnx2x_free_fp_mem_at(bp, i);
4362}
4363
Eric Dumazet1191cb82012-04-27 21:39:21 +00004364static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004365{
4366 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004367 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004368 bnx2x_fp(bp, index, sb_index_values) =
4369 (__le16 *)status_blk.e2_sb->sb.index_values;
4370 bnx2x_fp(bp, index, sb_running_index) =
4371 (__le16 *)status_blk.e2_sb->sb.running_index;
4372 } else {
4373 bnx2x_fp(bp, index, sb_index_values) =
4374 (__le16 *)status_blk.e1x_sb->sb.index_values;
4375 bnx2x_fp(bp, index, sb_running_index) =
4376 (__le16 *)status_blk.e1x_sb->sb.running_index;
4377 }
4378}
4379
Eric Dumazet1191cb82012-04-27 21:39:21 +00004380/* Returns the number of actually allocated BDs */
4381static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4382 int rx_ring_size)
4383{
4384 struct bnx2x *bp = fp->bp;
4385 u16 ring_prod, cqe_ring_prod;
4386 int i, failure_cnt = 0;
4387
4388 fp->rx_comp_cons = 0;
4389 cqe_ring_prod = ring_prod = 0;
4390
4391 /* This routine is called only during fo init so
4392 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4393 */
4394 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004395 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004396 failure_cnt++;
4397 continue;
4398 }
4399 ring_prod = NEXT_RX_IDX(ring_prod);
4400 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4401 WARN_ON(ring_prod <= (i - failure_cnt));
4402 }
4403
4404 if (failure_cnt)
4405 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4406 i - failure_cnt, fp->index);
4407
4408 fp->rx_bd_prod = ring_prod;
4409 /* Limit the CQE producer by the CQE ring size */
4410 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4411 cqe_ring_prod);
4412 fp->rx_pkt = fp->rx_calls = 0;
4413
Barak Witkowski15192a82012-06-19 07:48:28 +00004414 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004415
4416 return i - failure_cnt;
4417}
4418
4419static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4420{
4421 int i;
4422
4423 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4424 struct eth_rx_cqe_next_page *nextpg;
4425
4426 nextpg = (struct eth_rx_cqe_next_page *)
4427 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4428 nextpg->addr_hi =
4429 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4430 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4431 nextpg->addr_lo =
4432 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4434 }
4435}
4436
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004437static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4438{
4439 union host_hc_status_block *sb;
4440 struct bnx2x_fastpath *fp = &bp->fp[index];
4441 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004442 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004443 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004444
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004445 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004446 rx_ring_size = MIN_RX_SIZE_NONTPA;
4447 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004448 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004449 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4450
Yuval Mintz065f8b92012-10-03 04:22:59 +00004451 if (CHIP_IS_E3(bp)) {
4452 u32 cfg = SHMEM_RD(bp,
4453 dev_info.port_hw_config[BP_PORT(bp)].
4454 default_cfg);
4455
4456 /* Decrease ring size for 1G functions */
4457 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4458 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4459 rx_ring_size /= 10;
4460 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004461
David S. Miller8decf862011-09-22 03:23:13 -04004462 /* allocate at least number of buffers required by FW */
4463 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4464 MIN_RX_SIZE_TPA, rx_ring_size);
4465
4466 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004467 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004468 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004469
Yuval Mintz04c46732013-01-23 03:21:46 +00004470 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4471
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004472 /* Common */
4473 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004474
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004475 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004476 /* status blocks */
Joe Perchescd2b0382014-02-20 13:25:51 -08004477 if (!CHIP_IS_E1x(bp)) {
4478 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4479 sizeof(struct host_hc_status_block_e2));
4480 if (!sb->e2_sb)
4481 goto alloc_mem_err;
4482 } else {
4483 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4484 sizeof(struct host_hc_status_block_e1x));
4485 if (!sb->e1x_sb)
4486 goto alloc_mem_err;
4487 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004488 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004489
4490 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4491 * set shortcuts for it.
4492 */
4493 if (!IS_FCOE_IDX(index))
4494 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004495
4496 /* Tx */
4497 if (!skip_tx_queue(bp, index)) {
4498 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004499 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004500 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004501
Merav Sicron51c1a582012-03-18 10:33:38 +00004502 DP(NETIF_MSG_IFUP,
4503 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004504 index, cos);
4505
Joe Perchescd2b0382014-02-20 13:25:51 -08004506 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4507 sizeof(struct sw_tx_bd),
4508 GFP_KERNEL);
4509 if (!txdata->tx_buf_ring)
4510 goto alloc_mem_err;
4511 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4512 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4513 if (!txdata->tx_desc_ring)
4514 goto alloc_mem_err;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004515 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004516 }
4517
4518 /* Rx */
4519 if (!skip_rx_queue(bp, index)) {
4520 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Joe Perchescd2b0382014-02-20 13:25:51 -08004521 bnx2x_fp(bp, index, rx_buf_ring) =
4522 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4523 if (!bnx2x_fp(bp, index, rx_buf_ring))
4524 goto alloc_mem_err;
4525 bnx2x_fp(bp, index, rx_desc_ring) =
4526 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4527 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4528 if (!bnx2x_fp(bp, index, rx_desc_ring))
4529 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004530
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004531 /* Seed all CQEs by 1s */
Joe Perchescd2b0382014-02-20 13:25:51 -08004532 bnx2x_fp(bp, index, rx_comp_ring) =
4533 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4534 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4535 if (!bnx2x_fp(bp, index, rx_comp_ring))
4536 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004537
4538 /* SGE ring */
Joe Perchescd2b0382014-02-20 13:25:51 -08004539 bnx2x_fp(bp, index, rx_page_ring) =
4540 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4541 GFP_KERNEL);
4542 if (!bnx2x_fp(bp, index, rx_page_ring))
4543 goto alloc_mem_err;
4544 bnx2x_fp(bp, index, rx_sge_ring) =
4545 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4546 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4547 if (!bnx2x_fp(bp, index, rx_sge_ring))
4548 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004549 /* RX BD ring */
4550 bnx2x_set_next_page_rx_bd(fp);
4551
4552 /* CQ ring */
4553 bnx2x_set_next_page_rx_cq(fp);
4554
4555 /* BDs */
4556 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4557 if (ring_size < rx_ring_size)
4558 goto alloc_mem_err;
4559 }
4560
4561 return 0;
4562
4563/* handles low memory cases */
4564alloc_mem_err:
4565 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4566 index, ring_size);
4567 /* FW will drop all packets if queue is not big enough,
4568 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004569 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004570 */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02004571 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004572 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004573 /* release memory allocated for this queue */
4574 bnx2x_free_fp_mem_at(bp, index);
4575 return -ENOMEM;
4576 }
4577 return 0;
4578}
4579
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004580static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004581{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004582 if (!NO_FCOE(bp))
4583 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004584 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004585 /* we will fail load process instead of mark
4586 * NO_FCOE_FLAG
4587 */
4588 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004589
4590 return 0;
4591}
4592
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004593static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004594{
4595 int i;
4596
4597 /* 1. Allocate FP for leading - fatal if error
4598 * 2. Allocate RSS - fix number of queues if error
4599 */
4600
4601 /* leading */
4602 if (bnx2x_alloc_fp_mem_at(bp, 0))
4603 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004604
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004605 /* RSS */
4606 for_each_nondefault_eth_queue(bp, i)
4607 if (bnx2x_alloc_fp_mem_at(bp, i))
4608 break;
4609
4610 /* handle memory failures */
4611 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4612 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4613
4614 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004615 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004616 if (CNIC_SUPPORT(bp))
4617 /* move non eth FPs next to last eth FP
4618 * must be done in that order
4619 * FCOE_IDX < FWD_IDX < OOO_IDX
4620 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004621
Merav Sicron55c11942012-11-07 00:45:48 +00004622 /* move FCoE fp even NO_FCOE_FLAG is on */
4623 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4624 bp->num_ethernet_queues -= delta;
4625 bp->num_queues = bp->num_ethernet_queues +
4626 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004627 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4628 bp->num_queues + delta, bp->num_queues);
4629 }
4630
4631 return 0;
4632}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004633
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004634void bnx2x_free_mem_bp(struct bnx2x *bp)
4635{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004636 int i;
4637
4638 for (i = 0; i < bp->fp_array_size; i++)
4639 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004640 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004641 kfree(bp->sp_objs);
4642 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004643 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004644 kfree(bp->msix_table);
4645 kfree(bp->ilt);
4646}
4647
Bill Pemberton0329aba2012-12-03 09:24:24 -05004648int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004649{
4650 struct bnx2x_fastpath *fp;
4651 struct msix_entry *tbl;
4652 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004653 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004654 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004655 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004656
Ariel Elior6383c0b2011-07-14 08:31:57 +00004657 /*
4658 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004659 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004660 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004661 msix_table_size = bp->igu_sb_cnt;
4662 if (IS_PF(bp))
4663 msix_table_size++;
4664 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004665
4666 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004667 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004668 bp->fp_array_size = fp_array_size;
4669 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004670
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004671 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004672 if (!fp)
4673 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004674 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004675 fp[i].tpa_info =
4676 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4677 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4678 if (!(fp[i].tpa_info))
4679 goto alloc_err;
4680 }
4681
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004682 bp->fp = fp;
4683
Barak Witkowski15192a82012-06-19 07:48:28 +00004684 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004685 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004686 GFP_KERNEL);
4687 if (!bp->sp_objs)
4688 goto alloc_err;
4689
4690 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004691 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004692 GFP_KERNEL);
4693 if (!bp->fp_stats)
4694 goto alloc_err;
4695
Merav Sicron65565882012-06-19 07:48:26 +00004696 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004697 txq_array_size =
4698 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4699 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4700
4701 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4702 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004703 if (!bp->bnx2x_txq)
4704 goto alloc_err;
4705
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004706 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004707 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004708 if (!tbl)
4709 goto alloc_err;
4710 bp->msix_table = tbl;
4711
4712 /* ilt */
4713 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4714 if (!ilt)
4715 goto alloc_err;
4716 bp->ilt = ilt;
4717
4718 return 0;
4719alloc_err:
4720 bnx2x_free_mem_bp(bp);
4721 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004722}
4723
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004724int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004725{
4726 struct bnx2x *bp = netdev_priv(dev);
4727
4728 if (unlikely(!netif_running(dev)))
4729 return 0;
4730
Yuval Mintz5d07d862012-09-13 02:56:21 +00004731 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004732 return bnx2x_nic_load(bp, LOAD_NORMAL);
4733}
4734
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004735int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4736{
4737 u32 sel_phy_idx = 0;
4738 if (bp->link_params.num_phys <= 1)
4739 return INT_PHY;
4740
4741 if (bp->link_vars.link_up) {
4742 sel_phy_idx = EXT_PHY1;
4743 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4744 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4745 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4746 sel_phy_idx = EXT_PHY2;
4747 } else {
4748
4749 switch (bnx2x_phy_selection(&bp->link_params)) {
4750 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4751 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4752 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4753 sel_phy_idx = EXT_PHY1;
4754 break;
4755 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4756 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4757 sel_phy_idx = EXT_PHY2;
4758 break;
4759 }
4760 }
4761
4762 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004763}
4764int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4765{
4766 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4767 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004768 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004769 * swapping is enabled). So when swapping is enabled, we need to reverse
4770 * the configuration
4771 */
4772
4773 if (bp->link_params.multi_phy_config &
4774 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4775 if (sel_phy_idx == EXT_PHY1)
4776 sel_phy_idx = EXT_PHY2;
4777 else if (sel_phy_idx == EXT_PHY2)
4778 sel_phy_idx = EXT_PHY1;
4779 }
4780 return LINK_CONFIG_IDX(sel_phy_idx);
4781}
4782
Merav Sicron55c11942012-11-07 00:45:48 +00004783#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004784int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4785{
4786 struct bnx2x *bp = netdev_priv(dev);
4787 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4788
4789 switch (type) {
4790 case NETDEV_FCOE_WWNN:
4791 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4792 cp->fcoe_wwn_node_name_lo);
4793 break;
4794 case NETDEV_FCOE_WWPN:
4795 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4796 cp->fcoe_wwn_port_name_lo);
4797 break;
4798 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004799 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004800 return -EINVAL;
4801 }
4802
4803 return 0;
4804}
4805#endif
4806
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004807/* called with rtnl_lock */
4808int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4809{
4810 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004811
Yuval Mintz0650c0b2015-05-04 12:34:12 +03004812 if (pci_num_vf(bp->pdev)) {
4813 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4814 return -EPERM;
4815 }
4816
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004817 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004818 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004819 return -EAGAIN;
4820 }
4821
4822 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004823 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4824 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004825 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004826 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004827
4828 /* This does not race with packet allocation
4829 * because the actual alloc size is
4830 * only updated as part of load
4831 */
4832 dev->mtu = new_mtu;
4833
Michał Mirosław66371c42011-04-12 09:38:23 +00004834 return bnx2x_reload_if_running(dev);
4835}
4836
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004837netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004838 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004839{
4840 struct bnx2x *bp = netdev_priv(dev);
4841
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004842 if (pci_num_vf(bp->pdev)) {
4843 netdev_features_t changed = dev->features ^ features;
4844
4845 /* Revert the requested changes in features if they
4846 * would require internal reload of PF in bnx2x_set_features().
4847 */
4848 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4849 features &= ~NETIF_F_RXCSUM;
4850 features |= dev->features & NETIF_F_RXCSUM;
4851 }
4852
4853 if (changed & NETIF_F_LOOPBACK) {
4854 features &= ~NETIF_F_LOOPBACK;
4855 features |= dev->features & NETIF_F_LOOPBACK;
4856 }
4857 }
4858
Michał Mirosław66371c42011-04-12 09:38:23 +00004859 /* TPA requires Rx CSUM offloading */
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004860 if (!(features & NETIF_F_RXCSUM)) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004861 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004862 features &= ~NETIF_F_GRO;
4863 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004864
4865 return features;
4866}
4867
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004868int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004869{
4870 struct bnx2x *bp = netdev_priv(dev);
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004871 netdev_features_t changes = features ^ dev->features;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004872 bool bnx2x_reload = false;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004873 int rc;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004874
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004875 /* VFs or non SRIOV PFs should be able to change loopback feature */
4876 if (!pci_num_vf(bp->pdev)) {
4877 if (features & NETIF_F_LOOPBACK) {
4878 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4879 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4880 bnx2x_reload = true;
4881 }
4882 } else {
4883 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4884 bp->link_params.loopback_mode = LOOPBACK_NONE;
4885 bnx2x_reload = true;
4886 }
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004887 }
4888 }
4889
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004890 /* if GRO is changed while LRO is enabled, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004891 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4892 changes &= ~NETIF_F_GRO;
Eric Dumazet8802f572013-05-18 07:14:53 +00004893
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004894 /* if GRO is changed while HW TPA is off, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004895 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4896 changes &= ~NETIF_F_GRO;
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004897
Eric Dumazet8802f572013-05-18 07:14:53 +00004898 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004899 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004900
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004901 if (bnx2x_reload) {
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004902 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4903 dev->features = features;
4904 rc = bnx2x_reload_if_running(dev);
4905 return rc ? rc : 1;
4906 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004907 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004908 }
4909
Michał Mirosław66371c42011-04-12 09:38:23 +00004910 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004911}
4912
4913void bnx2x_tx_timeout(struct net_device *dev)
4914{
4915 struct bnx2x *bp = netdev_priv(dev);
4916
4917#ifdef BNX2X_STOP_ON_ERROR
4918 if (!bp->panic)
4919 bnx2x_panic();
4920#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004921
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004922 /* This allows the netif to be shutdown gracefully before resetting */
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004923 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004924}
4925
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004926int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4927{
4928 struct net_device *dev = pci_get_drvdata(pdev);
4929 struct bnx2x *bp;
4930
4931 if (!dev) {
4932 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4933 return -ENODEV;
4934 }
4935 bp = netdev_priv(dev);
4936
4937 rtnl_lock();
4938
4939 pci_save_state(pdev);
4940
4941 if (!netif_running(dev)) {
4942 rtnl_unlock();
4943 return 0;
4944 }
4945
4946 netif_device_detach(dev);
4947
Yuval Mintz5d07d862012-09-13 02:56:21 +00004948 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004949
4950 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4951
4952 rtnl_unlock();
4953
4954 return 0;
4955}
4956
4957int bnx2x_resume(struct pci_dev *pdev)
4958{
4959 struct net_device *dev = pci_get_drvdata(pdev);
4960 struct bnx2x *bp;
4961 int rc;
4962
4963 if (!dev) {
4964 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4965 return -ENODEV;
4966 }
4967 bp = netdev_priv(dev);
4968
4969 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004970 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004971 return -EAGAIN;
4972 }
4973
4974 rtnl_lock();
4975
4976 pci_restore_state(pdev);
4977
4978 if (!netif_running(dev)) {
4979 rtnl_unlock();
4980 return 0;
4981 }
4982
4983 bnx2x_set_power_state(bp, PCI_D0);
4984 netif_device_attach(dev);
4985
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004986 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4987
4988 rtnl_unlock();
4989
4990 return rc;
4991}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004992
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004993void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4994 u32 cid)
4995{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004996 if (!cxt) {
4997 BNX2X_ERR("bad context pointer %p\n", cxt);
4998 return;
4999 }
5000
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005001 /* ustorm cxt validation */
5002 cxt->ustorm_ag_context.cdu_usage =
5003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5004 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5005 /* xcontext validation */
5006 cxt->xstorm_ag_context.cdu_reserved =
5007 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5008 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5009}
5010
Eric Dumazet1191cb82012-04-27 21:39:21 +00005011static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5012 u8 fw_sb_id, u8 sb_index,
5013 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005014{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005015 u32 addr = BAR_CSTRORM_INTMEM +
5016 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5017 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00005018 DP(NETIF_MSG_IFUP,
5019 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5020 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005021}
5022
Eric Dumazet1191cb82012-04-27 21:39:21 +00005023static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5024 u16 fw_sb_id, u8 sb_index,
5025 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005026{
5027 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5028 u32 addr = BAR_CSTRORM_INTMEM +
5029 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005030 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005031 /* clear and set */
5032 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5033 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005034 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00005035 DP(NETIF_MSG_IFUP,
5036 "port %x fw_sb_id %d sb_index %d disable %d\n",
5037 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005038}
5039
5040void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5041 u8 sb_index, u8 disable, u16 usec)
5042{
5043 int port = BP_PORT(bp);
5044 u8 ticks = usec / BNX2X_BTR;
5045
5046 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5047
5048 disable = disable ? 1 : (usec ? 0 : 1);
5049 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5050}
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005051
5052void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5053 u32 verbose)
5054{
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005055 smp_mb__before_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005056 set_bit(flag, &bp->sp_rtnl_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005057 smp_mb__after_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005058 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5059 flag);
5060 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5061}
5062EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);