blob: f8d7a2f06950139b936dc7d793bb884bb579980d [file] [log] [blame]
Yuval Mintz4ad79e12015-07-22 09:16:23 +03001/* bnx2x_cmn.c: QLogic Everest network driver.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Yuval Mintz4ad79e12015-07-22 09:16:23 +03004 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
Ariel Elior08f6dd82014-05-27 13:11:36 +030011 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000012 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
Joe Perchesf1deab52011-08-14 12:16:21 +000020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000022#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000023#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000024#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000025#include <linux/ip.h>
Amir Vadaic9931892014-08-25 16:06:54 +030026#include <linux/crash_dump.h>
Yuval Mintz99690852013-01-14 05:11:49 +000027#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000028#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070029#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030030#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000031#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000033#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000034#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000035
stephen hemmingera8f47eb2014-01-09 22:20:11 -080036static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 }
51}
52
53static void bnx2x_add_all_napi(struct bnx2x *bp)
54{
55 int i;
56
57 /* Add NAPI objects */
58 for_each_eth_queue(bp, i) {
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
60 bnx2x_poll, NAPI_POLL_WEIGHT);
61 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 }
63}
64
65static int bnx2x_calc_num_queues(struct bnx2x *bp)
66{
Michal Schmidt7d0445d2014-02-25 16:04:24 +010067 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
Michal Schmidtff2ad302014-02-25 16:04:25 +010068
69 /* Reduce memory usage in kdump environment by using only one queue */
Amir Vadaic9931892014-08-25 16:06:54 +030070 if (is_kdump_kernel())
Michal Schmidtff2ad302014-02-25 16:04:25 +010071 nq = 1;
72
Michal Schmidt7d0445d2014-02-25 16:04:24 +010073 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
74 return nq;
stephen hemmingera8f47eb2014-01-09 22:20:11 -080075}
76
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000077/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000078 * bnx2x_move_fp - move content of the fastpath structure.
79 *
80 * @bp: driver handle
81 * @from: source FP index
82 * @to: destination FP index
83 *
84 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000085 * intact. This is done by first copying the napi struct from
86 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000087 * source onto the target. Update txdata pointers and related
88 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000089 */
90static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
91{
92 struct bnx2x_fastpath *from_fp = &bp->fp[from];
93 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000094 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000098 int old_max_eth_txqs, new_max_eth_txqs;
99 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +0300100 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +0000101
102 /* Copy the NAPI object as it has been already initialized */
103 from_fp->napi = to_fp->napi;
104
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000105 /* Move bnx2x_fastpath contents */
106 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000108
Yuval Mintz34d56262013-08-28 01:13:01 +0300109 /* Retain the tpa_info of the original `to' version as we don't want
110 * 2 FPs to contain the same tpa_info pointer.
111 */
112 to_fp->tpa_info = old_tpa_info;
113
Barak Witkowski15192a82012-06-19 07:48:28 +0000114 /* move sp_objs contents as well, as their indices match fp ones */
115 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
116
117 /* move fp_stats contents as well, as their indices match fp ones */
118 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
119
Merav Sicron65565882012-06-19 07:48:26 +0000120 /* Update txdata pointers in fp and move txdata content accordingly:
121 * Each fp consumes 'max_cos' txdata structures, so the index should be
122 * decremented by max_cos x delta.
123 */
124
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
127 (bp)->max_cos;
128 if (from == FCOE_IDX(bp)) {
129 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 }
132
Yuval Mintz4864a162013-01-10 04:53:39 +0000133 memcpy(&bp->bnx2x_txq[new_txdata_index],
134 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000135 sizeof(struct bnx2x_fp_txdata));
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000137}
138
Ariel Elior8ca5e172013-01-01 05:22:34 +0000139/**
140 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 *
142 * @bp: driver handle
143 * @buf: character buffer to fill with the fw name
144 * @buf_len: length of the above buffer
145 *
146 */
147void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148{
149 if (IS_PF(bp)) {
150 u8 phy_fw_ver[PHY_FW_VER_LEN];
151
152 phy_fw_ver[0] = '\0';
153 bnx2x_get_ext_phy_fw_version(&bp->link_params,
154 phy_fw_ver, PHY_FW_VER_LEN);
155 strlcpy(buf, bp->fw_ver, buf_len);
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
157 "bc %d.%d.%d%s%s",
158 (bp->common.bc_ver & 0xff0000) >> 16,
159 (bp->common.bc_ver & 0xff00) >> 8,
160 (bp->common.bc_ver & 0xff),
161 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
162 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000163 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000164 }
165}
166
David S. Miller4b87f922013-01-15 15:05:59 -0500167/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000168 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 *
170 * @bp: driver handle
171 * @delta: number of eth queues which were not allocated
172 */
173static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
174{
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
176
177 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000178 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000179 */
180 for (cos = 1; cos < bp->max_cos; cos++) {
181 for (i = 0; i < old_eth_num - delta; i++) {
182 struct bnx2x_fastpath *fp = &bp->fp[i];
183 int new_idx = cos * (old_eth_num - delta) + i;
184
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
186 sizeof(struct bnx2x_fp_txdata));
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
188 }
189 }
190}
191
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800192int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300193
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194/* free skb in the packet ring at pos idx
195 * return idx of last bd freed
196 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000197static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000198 u16 idx, unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000200{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000201 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000202 struct eth_tx_start_bd *tx_start_bd;
203 struct eth_tx_bd *tx_data_bd;
204 struct sk_buff *skb = tx_buf->skb;
205 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
206 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100207 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000208
209 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 prefetch(&skb->end);
211
Merav Sicron51c1a582012-03-18 10:33:38 +0000212 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000213 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000214
Ariel Elior6383c0b2011-07-14 08:31:57 +0000215 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216
217 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
218#ifdef BNX2X_STOP_ON_ERROR
219 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
220 BNX2X_ERR("BAD nbd!\n");
221 bnx2x_panic();
222 }
223#endif
224 new_cons = nbd + tx_buf->first_bd;
225
226 /* Get the next bd */
227 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
228
229 /* Skip a parse bd... */
230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +0300233 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
234 /* Skip second parse bd... */
235 --nbd;
236 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 }
238
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100239 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000240 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
242 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000243 --nbd;
244 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 }
246
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100247 /* unmap first bd */
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
249 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
250 DMA_TO_DEVICE);
251
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000252 /* now free frags */
253 while (nbd > 0) {
254
Ariel Elior6383c0b2011-07-14 08:31:57 +0000255 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
257 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
258 if (--nbd)
259 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
260 }
261
262 /* release skb */
263 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000264 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000265 (*pkts_compl)++;
266 (*bytes_compl) += skb->len;
Yuval Mintze1615902015-08-10 12:49:35 +0300267 dev_kfree_skb_any(skb);
Tom Herbert2df1a702011-11-28 16:33:37 +0000268 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000269
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000270 tx_buf->first_bd = 0;
271 tx_buf->skb = NULL;
272
273 return new_cons;
274}
275
Ariel Elior6383c0b2011-07-14 08:31:57 +0000276int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000278 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000279 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000280 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000281
282#ifdef BNX2X_STOP_ON_ERROR
283 if (unlikely(bp->panic))
284 return -1;
285#endif
286
Ariel Elior6383c0b2011-07-14 08:31:57 +0000287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
288 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
289 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000290
291 while (sw_cons != hw_cons) {
292 u16 pkt_cons;
293
294 pkt_cons = TX_BD(sw_cons);
295
Merav Sicron51c1a582012-03-18 10:33:38 +0000296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000299
Tom Herbert2df1a702011-11-28 16:33:37 +0000300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000301 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000302
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000303 sw_cons++;
304 }
305
Tom Herbert2df1a702011-11-28 16:33:37 +0000306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307
Ariel Elior6383c0b2011-07-14 08:31:57 +0000308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000310
311 /* Need to make the tx_bd_cons update visible to start_xmit()
312 * before checking for netif_tx_queue_stopped(). Without the
313 * memory barrier, there is a small possibility that
314 * start_xmit() will miss it and cause the queue to be stopped
315 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300316 * On the other hand we need an rmb() here to ensure the proper
317 * ordering of bit testing in the following
318 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000319 */
320 smp_mb();
321
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000322 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000323 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000324 * while it's empty. This could have happen if rx_action() gets
325 * suspended in bnx2x_tx_int() after the condition before
326 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
327 *
328 * stops the queue->sees fresh tx_bd_cons->releases the queue->
329 * sends some packets consuming the whole queue again->
330 * stops the queue
331 */
332
333 __netif_tx_lock(txq, smp_processor_id());
334
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000338 netif_tx_wake_queue(txq);
339
340 __netif_tx_unlock(txq);
341 }
342 return 0;
343}
344
345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 idx)
347{
348 u16 last_max = fp->last_max_sge;
349
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
352}
353
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355 u16 sge_len,
356 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000357{
358 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000359 u16 last_max, last_elem, first_elem;
360 u16 delta = 0;
361 u16 i;
362
363 if (!sge_len)
364 return;
365
366 /* First mark all used pages */
367 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000370
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000373
374 /* Here we assume that the last SGE index is the biggest */
375 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000376 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000378
379 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000382
383 /* If ring is not full */
384 if (last_elem + 1 != first_elem)
385 last_elem++;
386
387 /* Now update the prod */
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
390 break;
391
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000394 }
395
396 if (delta > 0) {
397 fp->rx_sge_prod += delta;
398 /* clear page-end entries */
399 bnx2x_clear_sge_mask_next_elems(fp);
400 }
401
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
405}
406
Yuval Mintz2de67432013-01-23 03:21:43 +0000407/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000408 * CQE (calculated by HW).
409 */
410static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000411 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800412 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000413{
Yuval Mintz2de67432013-01-23 03:21:43 +0000414 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000415 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
418
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
Eric Dumazete52fcb22011-11-14 06:05:34 +0000424 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000425 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800426 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000427 return 0;
428}
429
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000431 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300432 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000433{
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000441
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300442 /* print error if current state != stop */
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445
Eric Dumazete52fcb22011-11-14 06:05:34 +0000446 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000448 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300449 fp->rx_buf_size, DMA_FROM_DEVICE);
450 /*
451 * ...if it fails - move the skb from the consumer to the producer
452 * and set the current aggregation state as ERROR to drop it
453 * when TPA_STOP arrives.
454 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000455
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000458 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 return;
461 }
462
Eric Dumazete52fcb22011-11-14 06:05:34 +0000463 /* move empty data from pool to prod */
464 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000466 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300470 /* move partial skb from cons to pool (don't unmap yet) */
471 *first_buf = *cons_rx_buf;
472
473 /* mark bin state as START */
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000484 tpa_info->gro_size = gro_size;
485 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300486
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487#ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000490 fp->tpa_queue_used);
491#endif
492}
493
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000494/* Timestamp option length allowed for TPA aggregation:
495 *
496 * nop nop kind length echo val
497 */
498#define TPA_TSTAMP_OPT_LEN 12
499/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000500 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000501 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000502 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000503 * @parsing_flags: parsing flags from the START CQE
504 * @len_on_bd: total length of the first packet for the
505 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000506 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000507 *
508 * Approximate value of the MSS for this aggregation calculated using
509 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000510 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000511 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000512static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000513 u16 len_on_bd, unsigned int pkt_len,
514 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000515{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000516 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300517 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000518 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300519 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520
521 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000522 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300523 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300526 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000529
530 /* Check if there was a TCP timestamp, if there is it's will
531 * always be 12 bytes length: nop nop kind length echo val.
532 *
533 * Otherwise FW would close the aggregation.
534 */
535 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 hdrs_len += TPA_TSTAMP_OPT_LEN;
537
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539
540 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
541 * to skb_shinfo(skb)->gso_segs
542 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000544}
545
Michal Schmidt996dedb2013-09-05 22:13:09 +0200546static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000548{
Eric Dumazet1191cb82012-04-27 21:39:21 +0000549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300551 struct bnx2x_alloc_pool *pool = &fp->page_pool;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000552 dma_addr_t mapping;
553
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300554 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
555
556 /* put page reference used by the memory pool, since we
557 * won't be using this page as the mempool anymore.
558 */
559 if (pool->page)
560 put_page(pool->page);
561
562 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
563 if (unlikely(!pool->page)) {
564 BNX2X_ERR("Can't alloc sge\n");
565 return -ENOMEM;
566 }
567
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300568 pool->offset = 0;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000569 }
570
Michal Schmidt80316122015-06-26 17:50:00 +0200571 mapping = dma_map_page(&bp->pdev->dev, pool->page,
572 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
573 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
574 BNX2X_ERR("Can't map sge\n");
575 return -ENOMEM;
576 }
577
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300578 get_page(pool->page);
579 sw_buf->page = pool->page;
580 sw_buf->offset = pool->offset;
Eric Dumazet1191cb82012-04-27 21:39:21 +0000581
Eric Dumazet1191cb82012-04-27 21:39:21 +0000582 dma_unmap_addr_set(sw_buf, mapping, mapping);
583
584 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
585 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
586
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300587 pool->offset += SGE_PAGE_SIZE;
588
Eric Dumazet1191cb82012-04-27 21:39:21 +0000589 return 0;
590}
591
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000592static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000593 struct bnx2x_agg_info *tpa_info,
594 u16 pages,
595 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300596 struct eth_end_agg_rx_cqe *cqe,
597 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598{
599 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000600 u32 i, frag_len, frag_size;
601 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300602 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000603 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000604
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300605 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000606
607 if (fp->mode == TPA_MODE_GRO) {
608 gro_size = tpa_info->gro_size;
609 full_page = tpa_info->full_page;
610 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000611
612 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000613 if (frag_size)
614 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000615 le16_to_cpu(cqe->pkt_len),
616 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000617
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000618#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000619 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000620 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
621 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300622 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000623 bnx2x_panic();
624 return -EINVAL;
625 }
626#endif
627
628 /* Run through the SGL and compose the fragmented skb */
629 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300630 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000631
632 /* FW gives the indices of the SGE as if the ring is an array
633 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000634 if (fp->mode == TPA_MODE_GRO)
635 frag_len = min_t(u32, frag_size, (u32)full_page);
636 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000637 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000638
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000639 rx_pg = &fp->rx_page_ring[sge_idx];
640 old_rx_pg = *rx_pg;
641
642 /* If we fail to allocate a substitute page, we simply stop
643 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000645 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000647 return err;
648 }
649
Michal Schmidt80316122015-06-26 17:50:00 +0200650 dma_unmap_page(&bp->pdev->dev,
651 dma_unmap_addr(&old_rx_pg, mapping),
652 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000653 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000654 if (fp->mode == TPA_MODE_LRO)
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300655 skb_fill_page_desc(skb, j, old_rx_pg.page,
656 old_rx_pg.offset, frag_len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000657 else { /* GRO */
658 int rem;
659 int offset = 0;
660 for (rem = frag_len; rem > 0; rem -= gro_size) {
661 int len = rem > gro_size ? gro_size : rem;
662 skb_fill_page_desc(skb, frag_id++,
Gabriel Krisman Bertazi4cace672015-05-27 13:51:43 -0300663 old_rx_pg.page,
664 old_rx_pg.offset + offset,
665 len);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000666 if (offset)
667 get_page(old_rx_pg.page);
668 offset += len;
669 }
670 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000671
672 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000673 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000674 skb->len += frag_len;
675
676 frag_size -= frag_len;
677 }
678
679 return 0;
680}
681
Eric Dumazetd46d1322012-12-10 12:16:06 +0000682static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
683{
684 if (fp->rx_frag_size)
Alexander Duycke51423d2015-05-06 21:12:31 -0700685 skb_free_frag(data);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000686 else
687 kfree(data);
688}
689
Michal Schmidt996dedb2013-09-05 22:13:09 +0200690static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000691{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200692 if (fp->rx_frag_size) {
693 /* GFP_KERNEL allocations are used only during initialization */
Mel Gormand0164ad2015-11-06 16:28:21 -0800694 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
Michal Schmidt996dedb2013-09-05 22:13:09 +0200695 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000696
Michal Schmidt996dedb2013-09-05 22:13:09 +0200697 return netdev_alloc_frag(fp->rx_frag_size);
698 }
699
700 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000701}
702
Yuval Mintz99690852013-01-14 05:11:49 +0000703#ifdef CONFIG_INET
704static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
705{
706 const struct iphdr *iph = ip_hdr(skb);
707 struct tcphdr *th;
708
709 skb_set_transport_header(skb, sizeof(struct iphdr));
710 th = tcp_hdr(skb);
711
712 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
713 iph->saddr, iph->daddr, 0);
714}
715
716static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
717{
718 struct ipv6hdr *iph = ipv6_hdr(skb);
719 struct tcphdr *th;
720
721 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
722 th = tcp_hdr(skb);
723
724 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
725 &iph->saddr, &iph->daddr, 0);
726}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000727
728static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
729 void (*gro_func)(struct bnx2x*, struct sk_buff*))
730{
731 skb_set_network_header(skb, 0);
732 gro_func(bp, skb);
733 tcp_gro_complete(skb);
734}
Yuval Mintz99690852013-01-14 05:11:49 +0000735#endif
736
737static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
738 struct sk_buff *skb)
739{
740#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000741 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000742 switch (be16_to_cpu(skb->protocol)) {
743 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000744 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000745 break;
746 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000748 break;
749 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000750 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000751 be16_to_cpu(skb->protocol));
752 }
Yuval Mintz99690852013-01-14 05:11:49 +0000753 }
754#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700755 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000756 napi_gro_receive(&fp->napi, skb);
757}
758
Eric Dumazet1191cb82012-04-27 21:39:21 +0000759static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
760 struct bnx2x_agg_info *tpa_info,
761 u16 pages,
762 struct eth_end_agg_rx_cqe *cqe,
763 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000764{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300765 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000766 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300767 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000768 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000769 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300770 u8 old_tpa_state = tpa_info->tpa_state;
771
772 tpa_info->tpa_state = BNX2X_TPA_STOP;
773
774 /* If we there was an error during the handling of the TPA_START -
775 * drop this aggregation.
776 */
777 if (old_tpa_state == BNX2X_TPA_ERROR)
778 goto drop;
779
Eric Dumazete52fcb22011-11-14 06:05:34 +0000780 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200781 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000782 /* Unmap skb in the pool anyway, as we are going to change
783 pool entry status to BNX2X_TPA_STOP even if new skb allocation
784 fails. */
785 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800786 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000787 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000788 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000789
Eric Dumazete52fcb22011-11-14 06:05:34 +0000790 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000791#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800792 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000793 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800794 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000795 bnx2x_panic();
796 return;
797 }
798#endif
799
Eric Dumazete52fcb22011-11-14 06:05:34 +0000800 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000801 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800802 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000803
804 skb->protocol = eth_type_trans(skb, bp->dev);
805 skb->ip_summed = CHECKSUM_UNNECESSARY;
806
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
808 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300809 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000810 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000811 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000812 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000813 DP(NETIF_MSG_RX_STATUS,
814 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000815 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000816 }
817
Eric Dumazete52fcb22011-11-14 06:05:34 +0000818 /* put new data in bin */
819 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000820
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300821 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000822 }
Eric Dumazet07b0f002014-06-26 00:44:02 -0700823 if (new_data)
824 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300825drop:
826 /* drop the packet and keep the buffer in the bin */
827 DP(NETIF_MSG_RX_STATUS,
828 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000830}
831
Michal Schmidt996dedb2013-09-05 22:13:09 +0200832static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
833 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000834{
835 u8 *data;
836 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
837 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
838 dma_addr_t mapping;
839
Michal Schmidt996dedb2013-09-05 22:13:09 +0200840 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000841 if (unlikely(data == NULL))
842 return -ENOMEM;
843
844 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
845 fp->rx_buf_size,
846 DMA_FROM_DEVICE);
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000848 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000849 BNX2X_ERR("Can't map rx data\n");
850 return -ENOMEM;
851 }
852
853 rx_buf->data = data;
854 dma_unmap_addr_set(rx_buf, mapping, mapping);
855
856 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
857 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
858
859 return 0;
860}
861
Barak Witkowski15192a82012-06-19 07:48:28 +0000862static
863void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
864 struct bnx2x_fastpath *fp,
865 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000866{
Michal Schmidte4889212012-09-13 12:59:44 +0000867 /* Do nothing if no L4 csum validation was done.
868 * We do not check whether IP csum was validated. For IPv4 we assume
869 * that if the card got as far as validating the L4 csum, it also
870 * validated the IP csum. IPv6 has no IP csum.
871 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000872 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000873 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000874 return;
875
Michal Schmidte4889212012-09-13 12:59:44 +0000876 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000877
878 if (cqe->fast_path_cqe.type_error_flags &
879 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
880 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000881 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000882 else
883 skb->ip_summed = CHECKSUM_UNNECESSARY;
884}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000885
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800886static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000887{
888 struct bnx2x *bp = fp->bp;
889 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300890 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000891 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300892 union eth_rx_cqe *cqe;
893 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000894
895#ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
897 return 0;
898#endif
Eric W. Biedermanb3529742014-03-14 17:57:59 -0700899 if (budget <= 0)
900 return rx_pkt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000901
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000902 bd_cons = fp->rx_bd_cons;
903 bd_prod = fp->rx_bd_prod;
904 bd_prod_fw = bd_prod;
905 sw_comp_cons = fp->rx_comp_cons;
906 sw_comp_prod = fp->rx_comp_prod;
907
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300908 comp_ring_cons = RCQ_BD(sw_comp_cons);
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
910 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000911
912 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300913 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000914
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300915 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916 struct sw_rx_bd *rx_buf = NULL;
917 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000918 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300919 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000920 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000921 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800922 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800923 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000924
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300925#ifdef BNX2X_STOP_ON_ERROR
926 if (unlikely(bp->panic))
927 return 0;
928#endif
929
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000930 bd_prod = RX_BD(bd_prod);
931 bd_cons = RX_BD(bd_cons);
932
wenxiong@linux.vnet.ibm.com9aaae042014-06-03 14:14:46 -0500933 /* A rmb() is required to ensure that the CQE is not read
934 * before it is written by the adapter DMA. PCI ordering
935 * rules will make sure the other fields are written before
936 * the marker at the end of struct eth_fast_path_rx_cqe
937 * but without rmb() a weakly ordered processor can process
938 * stale data. Without the barrier TPA state-machine might
939 * enter inconsistent state and kernel stack might be
940 * provided with incorrect packet description - these lead
941 * to various kernel crashed.
942 */
943 rmb();
944
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300945 cqe_fp_flags = cqe_fp->type_error_flags;
946 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000947
Merav Sicron51c1a582012-03-18 10:33:38 +0000948 DP(NETIF_MSG_RX_STATUS,
949 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
950 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300951 cqe_fp_flags, cqe_fp->status_flags,
952 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000953 le16_to_cpu(cqe_fp->vlan_tag),
954 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000955
956 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300957 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000958 bnx2x_sp_event(fp, cqe);
959 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000961
Eric Dumazete52fcb22011-11-14 06:05:34 +0000962 rx_buf = &fp->rx_buf_ring[bd_cons];
963 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000964
Eric Dumazete52fcb22011-11-14 06:05:34 +0000965 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000966 struct bnx2x_agg_info *tpa_info;
967 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300968#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000969 /* sanity check */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200970 if (fp->mode == TPA_MODE_DISABLED &&
Eric Dumazete52fcb22011-11-14 06:05:34 +0000971 (CQE_TYPE_START(cqe_fp_type) ||
972 CQE_TYPE_STOP(cqe_fp_type)))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +0200973 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000974 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300975#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000976
Eric Dumazete52fcb22011-11-14 06:05:34 +0000977 if (CQE_TYPE_START(cqe_fp_type)) {
978 u16 queue = cqe_fp->queue_index;
979 DP(NETIF_MSG_RX_STATUS,
980 "calling tpa_start on queue %d\n",
981 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000982
Eric Dumazete52fcb22011-11-14 06:05:34 +0000983 bnx2x_tpa_start(fp, queue,
984 bd_cons, bd_prod,
985 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000986
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000987 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000988 }
989 queue = cqe->end_agg_cqe.queue_index;
990 tpa_info = &fp->tpa_info[queue];
991 DP(NETIF_MSG_RX_STATUS,
992 "calling tpa_stop on queue %d\n",
993 queue);
994
995 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
996 tpa_info->len_on_bd;
997
998 if (fp->mode == TPA_MODE_GRO)
999 pages = (frag_size + tpa_info->full_page - 1) /
1000 tpa_info->full_page;
1001 else
1002 pages = SGE_PAGE_ALIGN(frag_size) >>
1003 SGE_PAGE_SHIFT;
1004
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1006 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001007#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001008 if (bp->panic)
1009 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010#endif
1011
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001012 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1013 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001014 }
1015 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00001016 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001017 pad = cqe_fp->placement_offset;
1018 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001019 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +00001020 pad + RX_COPY_THRESH,
1021 DMA_FROM_DEVICE);
1022 pad += NET_SKB_PAD;
1023 prefetch(data + pad); /* speedup eth_type_trans() */
1024 /* is this an error packet? */
1025 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001026 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001027 "ERROR flags %x rx packet %u\n",
1028 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +00001029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001030 goto reuse_rx;
1031 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001032
Eric Dumazete52fcb22011-11-14 06:05:34 +00001033 /* Since we don't have a jumbo ring
1034 * copy small packets if mtu > 1500
1035 */
1036 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1037 (len <= RX_COPY_THRESH)) {
Alexander Duyck45abfb12014-12-09 19:41:17 -08001038 skb = napi_alloc_skb(&fp->napi, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001039 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001041 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001043 goto reuse_rx;
1044 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001045 memcpy(skb->data, data + pad, len);
1046 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1047 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1049 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001050 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001051 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001052 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001053 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001054 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001055 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001056 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001057 bnx2x_fp_qstats(bp, fp)->
1058 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001059 goto next_rx;
1060 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001062 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001063 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1064 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001066reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001067 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001068 goto next_rx;
1069 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001070 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001072 skb_put(skb, len);
1073 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001074
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001075 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001076 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1077 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001078
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001079 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001080
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001081 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001082 bnx2x_csum_validate(skb, cqe, fp,
1083 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001085 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086
Michal Kalderoneeed0182014-08-17 16:47:44 +03001087 /* Check if this packet was timestamped */
Yuval Mintz56daf662014-08-28 08:07:32 +03001088 if (unlikely(cqe->fast_path_cqe.type_error_flags &
Michal Kalderoneeed0182014-08-17 16:47:44 +03001089 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1090 bnx2x_set_rx_ts(bp, skb);
1091
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001092 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1093 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001094 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001095 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001096
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001097 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001098
1099 if (bnx2x_fp_ll_polling(fp))
1100 netif_receive_skb(skb);
1101 else
1102 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001103next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001104 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001105
1106 bd_cons = NEXT_RX_IDX(bd_cons);
1107 bd_prod = NEXT_RX_IDX(bd_prod);
1108 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1109 rx_pkt++;
1110next_cqe:
1111 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1112 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1113
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001114 /* mark CQE as free */
1115 BNX2X_SEED_CQE(cqe_fp);
1116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001117 if (rx_pkt == budget)
1118 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001119
1120 comp_ring_cons = RCQ_BD(sw_comp_cons);
1121 cqe = &fp->rx_comp_ring[comp_ring_cons];
1122 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001123 } /* while */
1124
1125 fp->rx_bd_cons = bd_cons;
1126 fp->rx_bd_prod = bd_prod_fw;
1127 fp->rx_comp_cons = sw_comp_cons;
1128 fp->rx_comp_prod = sw_comp_prod;
1129
1130 /* Update producers */
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1132 fp->rx_sge_prod);
1133
1134 fp->rx_pkt += rx_pkt;
1135 fp->rx_calls++;
1136
1137 return rx_pkt;
1138}
1139
1140static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1141{
1142 struct bnx2x_fastpath *fp = fp_cookie;
1143 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001144 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001145
Merav Sicron51c1a582012-03-18 10:33:38 +00001146 DP(NETIF_MSG_INTR,
1147 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001148 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001149
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001151
1152#ifdef BNX2X_STOP_ON_ERROR
1153 if (unlikely(bp->panic))
1154 return IRQ_HANDLED;
1155#endif
1156
1157 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001158 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001159 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001160
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001161 prefetch(&fp->sb_running_index[SM_RX_ID]);
Eric Dumazetf5fbf112014-10-29 17:07:50 -07001162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001163
1164 return IRQ_HANDLED;
1165}
1166
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001167/* HW Lock for shared dual port PHYs */
1168void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1169{
1170 mutex_lock(&bp->port.phy_mutex);
1171
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001173}
1174
1175void bnx2x_release_phy_lock(struct bnx2x *bp)
1176{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178
1179 mutex_unlock(&bp->port.phy_mutex);
1180}
1181
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001182/* calculates MF speed according to current linespeed and MF configuration */
1183u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1184{
1185 u16 line_speed = bp->link_vars.line_speed;
1186 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001187 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1188 bp->mf_config[BP_VN(bp)]);
1189
1190 /* Calculate the current MAX line speed limit for the MF
1191 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001192 */
Yuval Mintzda3cc2d2015-08-17 08:28:25 +03001193 if (IS_MF_PERCENT_BW(bp))
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001194 line_speed = (line_speed * maxCfg) / 100;
1195 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001196 u16 vn_max_rate = maxCfg * 100;
1197
1198 if (vn_max_rate < line_speed)
1199 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001200 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001201 }
1202
1203 return line_speed;
1204}
1205
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001206/**
1207 * bnx2x_fill_report_data - fill link report data to report
1208 *
1209 * @bp: driver handle
1210 * @data: link state to update
1211 *
1212 * It uses a none-atomic bit operations because is called under the mutex.
1213 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001214static void bnx2x_fill_report_data(struct bnx2x *bp,
1215 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001216{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001217 memset(data, 0, sizeof(*data));
1218
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001219 if (IS_PF(bp)) {
1220 /* Fill the report data: effective line speed */
1221 data->line_speed = bnx2x_get_mf_speed(bp);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001222
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001223 /* Link is down */
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1225 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1226 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001227
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001228 if (!BNX2X_NUM_ETH_QUEUES(bp))
1229 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1230 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001231
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001232 /* Full DUPLEX */
1233 if (bp->link_vars.duplex == DUPLEX_FULL)
1234 __set_bit(BNX2X_LINK_REPORT_FD,
1235 &data->link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001236
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001237 /* Rx Flow Control is ON */
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1239 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1240 &data->link_report_flags);
1241
1242 /* Tx Flow Control is ON */
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1244 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1245 &data->link_report_flags);
1246 } else { /* VF */
1247 *data = bp->vf_link_vars;
1248 }
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001249}
1250
1251/**
1252 * bnx2x_link_report - report link status to OS.
1253 *
1254 * @bp: driver handle
1255 *
1256 * Calls the __bnx2x_link_report() under the same locking scheme
1257 * as a link/PHY state managing code to ensure a consistent link
1258 * reporting.
1259 */
1260
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001261void bnx2x_link_report(struct bnx2x *bp)
1262{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001263 bnx2x_acquire_phy_lock(bp);
1264 __bnx2x_link_report(bp);
1265 bnx2x_release_phy_lock(bp);
1266}
1267
1268/**
1269 * __bnx2x_link_report - report link status to OS.
1270 *
1271 * @bp: driver handle
1272 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001273 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001274 * Should be called under the phy_lock.
1275 */
1276void __bnx2x_link_report(struct bnx2x *bp)
1277{
1278 struct bnx2x_link_report_data cur_data;
1279
1280 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001281 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001282 bnx2x_read_mf_cfg(bp);
1283
1284 /* Read the current link report info */
1285 bnx2x_fill_report_data(bp, &cur_data);
1286
1287 /* Don't report link down or exactly the same link status twice */
1288 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1289 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &bp->last_reported_link.link_report_flags) &&
1291 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1292 &cur_data.link_report_flags)))
1293 return;
1294
1295 bp->link_cnt++;
1296
1297 /* We are going to report a new link parameters now -
1298 * remember the current data for the next time.
1299 */
1300 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1301
Dmitry Kravkov6495d152014-06-26 14:31:04 +03001302 /* propagate status to VFs */
1303 if (IS_PF(bp))
1304 bnx2x_iov_link_update(bp);
1305
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001306 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1307 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001308 netif_carrier_off(bp->dev);
1309 netdev_err(bp->dev, "NIC Link is Down\n");
1310 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001311 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001312 const char *duplex;
1313 const char *flow;
1314
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001315 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001316
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001317 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1318 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001319 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320 else
Joe Perches94f05b02011-08-14 12:16:20 +00001321 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001322
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001323 /* Handle the FC at the end so that only these flags would be
1324 * possibly set. This way we may easily check if there is no FC
1325 * enabled.
1326 */
1327 if (cur_data.link_report_flags) {
1328 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1329 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001330 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1331 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001332 flow = "ON - receive & transmit";
1333 else
1334 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001335 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001336 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001337 }
Joe Perches94f05b02011-08-14 12:16:20 +00001338 } else {
1339 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001340 }
Joe Perches94f05b02011-08-14 12:16:20 +00001341 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1342 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001343 }
1344}
1345
Eric Dumazet1191cb82012-04-27 21:39:21 +00001346static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1347{
1348 int i;
1349
1350 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1351 struct eth_rx_sge *sge;
1352
1353 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1354 sge->addr_hi =
1355 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1356 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1357
1358 sge->addr_lo =
1359 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1360 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1361 }
1362}
1363
1364static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1365 struct bnx2x_fastpath *fp, int last)
1366{
1367 int i;
1368
1369 for (i = 0; i < last; i++) {
1370 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1371 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1372 u8 *data = first_buf->data;
1373
1374 if (data == NULL) {
1375 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1376 continue;
1377 }
1378 if (tpa_info->tpa_state == BNX2X_TPA_START)
1379 dma_unmap_single(&bp->pdev->dev,
1380 dma_unmap_addr(first_buf, mapping),
1381 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001382 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001383 first_buf->data = NULL;
1384 }
1385}
1386
Merav Sicron55c11942012-11-07 00:45:48 +00001387void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1388{
1389 int j;
1390
1391 for_each_rx_queue_cnic(bp, j) {
1392 struct bnx2x_fastpath *fp = &bp->fp[j];
1393
1394 fp->rx_bd_cons = 0;
1395
1396 /* Activate BD ring */
1397 /* Warning!
1398 * this will generate an interrupt (to the TSTORM)
1399 * must only be done after chip is initialized
1400 */
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1402 fp->rx_sge_prod);
1403 }
1404}
1405
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001406void bnx2x_init_rx_rings(struct bnx2x *bp)
1407{
1408 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001409 u16 ring_prod;
1410 int i, j;
1411
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001412 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001413 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001414 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001415
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001416 DP(NETIF_MSG_IFUP,
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1418
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001419 if (fp->mode != TPA_MODE_DISABLED) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001420 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001421 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001422 struct bnx2x_agg_info *tpa_info =
1423 &fp->tpa_info[i];
1424 struct sw_rx_bd *first_buf =
1425 &tpa_info->first_buf;
1426
Michal Schmidt996dedb2013-09-05 22:13:09 +02001427 first_buf->data =
1428 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001429 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001430 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1431 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001432 bnx2x_free_tpa_pool(bp, fp, i);
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001433 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001434 break;
1435 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001436 dma_unmap_addr_set(first_buf, mapping, 0);
1437 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001438 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001439
1440 /* "next page" elements initialization */
1441 bnx2x_set_next_page_sgl(fp);
1442
1443 /* set SGEs bit mask */
1444 bnx2x_init_sge_ring_bit_mask(fp);
1445
1446 /* Allocate SGEs and initialize the ring elements */
1447 for (i = 0, ring_prod = 0;
1448 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1449
Michal Schmidt996dedb2013-09-05 22:13:09 +02001450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1451 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001452 BNX2X_ERR("was only able to allocate %d rx sges\n",
1453 i);
1454 BNX2X_ERR("disabling TPA for queue[%d]\n",
1455 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001456 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001457 bnx2x_free_rx_sge_range(bp, fp,
1458 ring_prod);
1459 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001460 MAX_AGG_QS(bp));
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001461 fp->mode = TPA_MODE_DISABLED;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001462 ring_prod = 0;
1463 break;
1464 }
1465 ring_prod = NEXT_SGE_IDX(ring_prod);
1466 }
1467
1468 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001469 }
1470 }
1471
Merav Sicron55c11942012-11-07 00:45:48 +00001472 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001473 struct bnx2x_fastpath *fp = &bp->fp[j];
1474
1475 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001476
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001477 /* Activate BD ring */
1478 /* Warning!
1479 * this will generate an interrupt (to the TSTORM)
1480 * must only be done after chip is initialized
1481 */
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1483 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001484
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485 if (j != 0)
1486 continue;
1487
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001488 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001489 REG_WR(bp, BAR_USTRORM_INTMEM +
1490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1491 U64_LO(fp->rx_comp_mapping));
1492 REG_WR(bp, BAR_USTRORM_INTMEM +
1493 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1494 U64_HI(fp->rx_comp_mapping));
1495 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001496 }
1497}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001498
Merav Sicron55c11942012-11-07 00:45:48 +00001499static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1500{
1501 u8 cos;
1502 struct bnx2x *bp = fp->bp;
1503
1504 for_each_cos_in_tx_queue(fp, cos) {
1505 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1506 unsigned pkts_compl = 0, bytes_compl = 0;
1507
1508 u16 sw_prod = txdata->tx_pkt_prod;
1509 u16 sw_cons = txdata->tx_pkt_cons;
1510
1511 while (sw_cons != sw_prod) {
1512 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1513 &pkts_compl, &bytes_compl);
1514 sw_cons++;
1515 }
1516
1517 netdev_tx_reset_queue(
1518 netdev_get_tx_queue(bp->dev,
1519 txdata->txq_index));
1520 }
1521}
1522
1523static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1524{
1525 int i;
1526
1527 for_each_tx_queue_cnic(bp, i) {
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 }
1530}
1531
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001532static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1533{
1534 int i;
1535
Merav Sicron55c11942012-11-07 00:45:48 +00001536 for_each_eth_queue(bp, i) {
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001538 }
1539}
1540
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001541static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1542{
1543 struct bnx2x *bp = fp->bp;
1544 int i;
1545
1546 /* ring wasn't allocated */
1547 if (fp->rx_buf_ring == NULL)
1548 return;
1549
1550 for (i = 0; i < NUM_RX_BD; i++) {
1551 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001552 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001553
Eric Dumazete52fcb22011-11-14 06:05:34 +00001554 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001555 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001556 dma_unmap_single(&bp->pdev->dev,
1557 dma_unmap_addr(rx_buf, mapping),
1558 fp->rx_buf_size, DMA_FROM_DEVICE);
1559
Eric Dumazete52fcb22011-11-14 06:05:34 +00001560 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001561 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001562 }
1563}
1564
Merav Sicron55c11942012-11-07 00:45:48 +00001565static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1566{
1567 int j;
1568
1569 for_each_rx_queue_cnic(bp, j) {
1570 bnx2x_free_rx_bds(&bp->fp[j]);
1571 }
1572}
1573
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001574static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1575{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001576 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001577
Merav Sicron55c11942012-11-07 00:45:48 +00001578 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001579 struct bnx2x_fastpath *fp = &bp->fp[j];
1580
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001581 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001582
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02001583 if (fp->mode != TPA_MODE_DISABLED)
David S. Miller8decf862011-09-22 03:23:13 -04001584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001585 }
1586}
1587
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001588static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001589{
1590 bnx2x_free_tx_skbs_cnic(bp);
1591 bnx2x_free_rx_skbs_cnic(bp);
1592}
1593
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001594void bnx2x_free_skbs(struct bnx2x *bp)
1595{
1596 bnx2x_free_tx_skbs(bp);
1597 bnx2x_free_rx_skbs(bp);
1598}
1599
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001600void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1601{
1602 /* load old values */
1603 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1604
1605 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1606 /* leave all but MAX value */
1607 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1608
1609 /* set new MAX value */
1610 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1611 & FUNC_MF_CFG_MAX_BW_MASK;
1612
1613 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1614 }
1615}
1616
Dmitry Kravkovca924292011-06-14 01:33:08 +00001617/**
1618 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1619 *
1620 * @bp: driver handle
1621 * @nvecs: number of vectors to be released
1622 */
1623static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001624{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001625 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001626
Dmitry Kravkovca924292011-06-14 01:33:08 +00001627 if (nvecs == offset)
1628 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001629
1630 /* VFs don't have a default SB */
1631 if (IS_PF(bp)) {
1632 free_irq(bp->msix_table[offset].vector, bp->dev);
1633 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1634 bp->msix_table[offset].vector);
1635 offset++;
1636 }
Merav Sicron55c11942012-11-07 00:45:48 +00001637
1638 if (CNIC_SUPPORT(bp)) {
1639 if (nvecs == offset)
1640 return;
1641 offset++;
1642 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001643
Dmitry Kravkovca924292011-06-14 01:33:08 +00001644 for_each_eth_queue(bp, i) {
1645 if (nvecs == offset)
1646 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001647 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1648 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001649
1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001651 }
1652}
1653
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001654void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001655{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001656 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001657 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1658 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1659
1660 /* vfs don't have a default status block */
1661 if (IS_PF(bp))
1662 nvecs++;
1663
1664 bnx2x_free_msix_irqs(bp, nvecs);
1665 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001666 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001667 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001668}
1669
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001670int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001671{
Ariel Elior1ab44342013-01-01 05:22:23 +00001672 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673
Ariel Elior1ab44342013-01-01 05:22:23 +00001674 /* VFs don't have a default status block */
1675 if (IS_PF(bp)) {
1676 bp->msix_table[msix_vec].entry = msix_vec;
1677 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1678 bp->msix_table[0].entry);
1679 msix_vec++;
1680 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001681
Merav Sicron55c11942012-11-07 00:45:48 +00001682 /* Cnic requires an msix vector for itself */
1683 if (CNIC_SUPPORT(bp)) {
1684 bp->msix_table[msix_vec].entry = msix_vec;
1685 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1686 msix_vec, bp->msix_table[msix_vec].entry);
1687 msix_vec++;
1688 }
1689
Ariel Elior6383c0b2011-07-14 08:31:57 +00001690 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001691 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001692 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001693 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1694 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001695 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001696 }
1697
Ariel Elior1ab44342013-01-01 05:22:23 +00001698 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1699 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001700
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001701 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1702 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001703 /*
1704 * reconfigure number of tx/rx queues according to available
1705 * MSI-X vectors
1706 */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001707 if (rc == -ENOSPC) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001708 /* Get by with single vector */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1710 if (rc < 0) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001711 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1712 rc);
1713 goto no_msix;
1714 }
1715
1716 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1717 bp->flags |= USING_SINGLE_MSIX_FLAG;
1718
Merav Sicron55c11942012-11-07 00:45:48 +00001719 BNX2X_DEV_INFO("set number of queues to 1\n");
1720 bp->num_ethernet_queues = 1;
1721 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001722 } else if (rc < 0) {
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001723 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001724 goto no_msix;
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001725 } else if (rc < msix_vec) {
1726 /* how less vectors we will have? */
1727 int diff = msix_vec - rc;
1728
1729 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1730
1731 /*
1732 * decrease number of queues by number of unallocated entries
1733 */
1734 bp->num_ethernet_queues -= diff;
1735 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1736
1737 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1738 bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001739 }
1740
1741 bp->flags |= USING_MSIX_FLAG;
1742
1743 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001744
1745no_msix:
1746 /* fall to INTx if not enough memory */
1747 if (rc == -ENOMEM)
1748 bp->flags |= DISABLE_MSI_FLAG;
1749
1750 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001751}
1752
1753static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1754{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001755 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001756
Ariel Eliorad5afc82013-01-01 05:22:26 +00001757 /* no default status block for vf */
1758 if (IS_PF(bp)) {
1759 rc = request_irq(bp->msix_table[offset++].vector,
1760 bnx2x_msix_sp_int, 0,
1761 bp->dev->name, bp->dev);
1762 if (rc) {
1763 BNX2X_ERR("request sp irq failed\n");
1764 return -EBUSY;
1765 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001766 }
1767
Merav Sicron55c11942012-11-07 00:45:48 +00001768 if (CNIC_SUPPORT(bp))
1769 offset++;
1770
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001771 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772 struct bnx2x_fastpath *fp = &bp->fp[i];
1773 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1774 bp->dev->name, i);
1775
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001776 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001777 bnx2x_msix_fp_int, 0, fp->name, fp);
1778 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001779 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1780 bp->msix_table[offset].vector, rc);
1781 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001782 return -EBUSY;
1783 }
1784
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001785 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001786 }
1787
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001788 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001789 if (IS_PF(bp)) {
1790 offset = 1 + CNIC_SUPPORT(bp);
1791 netdev_info(bp->dev,
1792 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1793 bp->msix_table[0].vector,
1794 0, bp->msix_table[offset].vector,
1795 i - 1, bp->msix_table[offset + i - 1].vector);
1796 } else {
1797 offset = CNIC_SUPPORT(bp);
1798 netdev_info(bp->dev,
1799 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1800 0, bp->msix_table[offset].vector,
1801 i - 1, bp->msix_table[offset + i - 1].vector);
1802 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001803 return 0;
1804}
1805
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001806int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001807{
1808 int rc;
1809
1810 rc = pci_enable_msi(bp->pdev);
1811 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001812 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001813 return -1;
1814 }
1815 bp->flags |= USING_MSI_FLAG;
1816
1817 return 0;
1818}
1819
1820static int bnx2x_req_irq(struct bnx2x *bp)
1821{
1822 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001823 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001824
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001825 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001826 flags = 0;
1827 else
1828 flags = IRQF_SHARED;
1829
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001830 if (bp->flags & USING_MSIX_FLAG)
1831 irq = bp->msix_table[0].vector;
1832 else
1833 irq = bp->pdev->irq;
1834
1835 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001836}
1837
Yuval Mintzc957d092013-06-25 08:50:11 +03001838static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001839{
1840 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001841 if (bp->flags & USING_MSIX_FLAG &&
1842 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001843 rc = bnx2x_req_msix_irqs(bp);
1844 if (rc)
1845 return rc;
1846 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001847 rc = bnx2x_req_irq(bp);
1848 if (rc) {
1849 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1850 return rc;
1851 }
1852 if (bp->flags & USING_MSI_FLAG) {
1853 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001854 netdev_info(bp->dev, "using MSI IRQ %d\n",
1855 bp->dev->irq);
1856 }
1857 if (bp->flags & USING_MSIX_FLAG) {
1858 bp->dev->irq = bp->msix_table[0].vector;
1859 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1860 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001861 }
1862 }
1863
1864 return 0;
1865}
1866
Merav Sicron55c11942012-11-07 00:45:48 +00001867static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1868{
1869 int i;
1870
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001871 for_each_rx_queue_cnic(bp, i) {
Eric Dumazet074975d2015-04-14 18:45:00 -07001872 bnx2x_fp_busy_poll_init(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001873 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001874 }
Merav Sicron55c11942012-11-07 00:45:48 +00001875}
1876
Eric Dumazet1191cb82012-04-27 21:39:21 +00001877static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001878{
1879 int i;
1880
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001881 for_each_eth_queue(bp, i) {
Eric Dumazet074975d2015-04-14 18:45:00 -07001882 bnx2x_fp_busy_poll_init(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001884 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001885}
1886
Merav Sicron55c11942012-11-07 00:45:48 +00001887static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1888{
1889 int i;
1890
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001891 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001892 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001893 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1894 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001895 }
Merav Sicron55c11942012-11-07 00:45:48 +00001896}
1897
Eric Dumazet1191cb82012-04-27 21:39:21 +00001898static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001899{
1900 int i;
1901
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001902 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001903 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001904 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1905 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001906 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001907}
1908
1909void bnx2x_netif_start(struct bnx2x *bp)
1910{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001911 if (netif_running(bp->dev)) {
1912 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001913 if (CNIC_LOADED(bp))
1914 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001915 bnx2x_int_enable(bp);
1916 if (bp->state == BNX2X_STATE_OPEN)
1917 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001918 }
1919}
1920
1921void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1922{
1923 bnx2x_int_disable_sync(bp, disable_hw);
1924 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001925 if (CNIC_LOADED(bp))
1926 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001927}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001928
Jason Wangf663dd92014-01-10 16:18:26 +08001929u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001930 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001931{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001932 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001933
Merav Sicron55c11942012-11-07 00:45:48 +00001934 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001935 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1936 u16 ether_type = ntohs(hdr->h_proto);
1937
1938 /* Skip VLAN tag if present */
1939 if (ether_type == ETH_P_8021Q) {
1940 struct vlan_ethhdr *vhdr =
1941 (struct vlan_ethhdr *)skb->data;
1942
1943 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1944 }
1945
1946 /* If ethertype is FCoE or FIP - use FCoE ring */
1947 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001948 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001949 }
Merav Sicron55c11942012-11-07 00:45:48 +00001950
David S. Miller823dcd22011-08-20 10:39:12 -07001951 /* select a non-FCoE queue */
Daniel Borkmann99932d42014-02-16 15:55:20 +01001952 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001953}
1954
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001955void bnx2x_set_num_queues(struct bnx2x *bp)
1956{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001957 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001958 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001959
Barak Witkowskia3348722012-04-23 03:04:46 +00001960 /* override in STORAGE SD modes */
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03001961 if (IS_MF_STORAGE_ONLY(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001962 bp->num_ethernet_queues = 1;
1963
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001964 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001965 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1966 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001967
1968 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001969}
1970
David S. Miller823dcd22011-08-20 10:39:12 -07001971/**
1972 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1973 *
1974 * @bp: Driver handle
1975 *
1976 * We currently support for at most 16 Tx queues for each CoS thus we will
1977 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1978 * bp->max_cos.
1979 *
1980 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1981 * index after all ETH L2 indices.
1982 *
1983 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1984 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001985 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001986 *
1987 * The proper configuration of skb->queue_mapping is handled by
1988 * bnx2x_select_queue() and __skb_tx_hash().
1989 *
1990 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1991 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1992 */
Merav Sicron55c11942012-11-07 00:45:48 +00001993static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001994{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001995 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001996
Merav Sicron65565882012-06-19 07:48:26 +00001997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001998 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001999
2000/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00002001 if (include_cnic && !NO_FCOE(bp)) {
2002 rx++;
2003 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002004 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002005
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2007 if (rc) {
2008 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2009 return rc;
2010 }
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2012 if (rc) {
2013 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2014 return rc;
2015 }
2016
Merav Sicron51c1a582012-03-18 10:33:38 +00002017 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002018 tx, rx);
2019
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002020 return rc;
2021}
2022
Eric Dumazet1191cb82012-04-27 21:39:21 +00002023static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002024{
2025 int i;
2026
2027 for_each_queue(bp, i) {
2028 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00002029 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002030
2031 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2032 if (IS_FCOE_IDX(i))
2033 /*
2034 * Although there are no IP frames expected to arrive to
2035 * this ring we still want to add an
2036 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2037 * overrun attack.
2038 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00002039 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002040 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00002041 mtu = bp->dev->mtu;
2042 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2043 IP_HEADER_ALIGNMENT_PADDING +
2044 ETH_OVREHEAD +
2045 mtu +
2046 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002047 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00002048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2050 else
2051 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002052 }
2053}
2054
Ariel Elior60cad4e2013-09-04 14:09:22 +03002055static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002056{
2057 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002058 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2059
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002060 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002061 * enabled
2062 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2064 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002065 bp->fp->cl_id +
2066 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002067
2068 /*
2069 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2070 * per-port, so if explicit configuration is needed , do it only
2071 * for a PMF.
2072 *
2073 * For 57712 and newer on the other hand it's a per-function
2074 * configuration.
2075 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002077}
2078
Ariel Elior60cad4e2013-09-04 14:09:22 +03002079int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2080 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002081{
Yuval Mintz3b603062012-03-18 10:33:39 +00002082 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002083
2084 /* Although RSS is meaningless when there is a single HW queue we
2085 * still need it enabled in order to have HW Rx hash generated.
2086 *
2087 * if (!is_eth_multi(bp))
2088 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2089 */
2090
Dmitry Kravkov96305232012-04-03 18:41:30 +00002091 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002092
2093 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2094
Ariel Elior60cad4e2013-09-04 14:09:22 +03002095 if (enable) {
2096 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002097
Ariel Elior60cad4e2013-09-04 14:09:22 +03002098 /* RSS configuration */
2099 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2101 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2103 if (rss_obj->udp_rss_v4)
2104 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2105 if (rss_obj->udp_rss_v6)
2106 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002107
Yuval Mintz28311f82015-07-22 09:16:22 +03002108 if (!CHIP_IS_E1x(bp)) {
2109 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2110 __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2111 __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2112
Dmitry Kravkove42780b2014-08-17 16:47:43 +03002113 /* valid only for TUNN_MODE_GRE tunnel mode */
Yuval Mintz28311f82015-07-22 09:16:22 +03002114 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2115 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002116 } else {
2117 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2118 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002119
Dmitry Kravkov96305232012-04-03 18:41:30 +00002120 /* Hash bits */
2121 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002122
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002123 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002124
Dmitry Kravkov96305232012-04-03 18:41:30 +00002125 if (config_hash) {
2126 /* RSS keys */
Eric Dumazete3ec69c2014-11-16 06:23:07 -08002127 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002128 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002129 }
2130
Ariel Elior60cad4e2013-09-04 14:09:22 +03002131 if (IS_PF(bp))
2132 return bnx2x_config_rss(bp, &params);
2133 else
2134 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135}
2136
Eric Dumazet1191cb82012-04-27 21:39:21 +00002137static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002138{
Yuval Mintz3b603062012-03-18 10:33:39 +00002139 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002140
2141 /* Prepare parameters for function state transitions */
2142 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2143
2144 func_params.f_obj = &bp->func_obj;
2145 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2146
2147 func_params.params.hw_init.load_phase = load_code;
2148
2149 return bnx2x_func_state_change(bp, &func_params);
2150}
2151
2152/*
2153 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002154 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002155 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002156void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002157{
2158 int rc;
2159 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002160 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002162
2163 /***************** Cleanup MACs' object first *************************/
2164
2165 /* Wait for completion of requested */
2166 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2167 /* Perform a dry cleanup */
2168 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2169
2170 /* Clean ETH primary MAC */
2171 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002173 &ramrod_flags);
2174 if (rc != 0)
2175 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2176
2177 /* Cleanup UC list */
2178 vlan_mac_flags = 0;
2179 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2181 &ramrod_flags);
2182 if (rc != 0)
2183 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2184
2185 /***************** Now clean mcast object *****************************/
2186 rparam.mcast_obj = &bp->mcast_obj;
2187 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2188
Yuval Mintz8b09be52013-08-01 17:30:59 +03002189 /* Add a DEL command... - Since we're doing a driver cleanup only,
2190 * we take a lock surrounding both the initial send and the CONTs,
2191 * as we don't want a true completion to disrupt us in the middle.
2192 */
2193 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2195 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002196 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2197 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002198
2199 /* ...and wait until all pending commands are cleared */
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2201 while (rc != 0) {
2202 if (rc < 0) {
2203 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2204 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002205 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002206 return;
2207 }
2208
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2210 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002211 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002212}
2213
2214#ifndef BNX2X_STOP_ON_ERROR
2215#define LOAD_ERROR_EXIT(bp, label) \
2216 do { \
2217 (bp)->state = BNX2X_STATE_ERROR; \
2218 goto label; \
2219 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002220
2221#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2222 do { \
2223 bp->cnic_loaded = false; \
2224 goto label; \
2225 } while (0)
2226#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002227#define LOAD_ERROR_EXIT(bp, label) \
2228 do { \
2229 (bp)->state = BNX2X_STATE_ERROR; \
2230 (bp)->panic = 1; \
2231 return -EBUSY; \
2232 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002233#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2234 do { \
2235 bp->cnic_loaded = false; \
2236 (bp)->panic = 1; \
2237 return -EBUSY; \
2238 } while (0)
2239#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002240
Ariel Eliorad5afc82013-01-01 05:22:26 +00002241static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002242{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2245 return;
2246}
Yuval Mintz452427b2012-03-26 20:47:07 +00002247
Ariel Eliorad5afc82013-01-01 05:22:26 +00002248static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2249{
Ariel Elior8db573b2013-01-01 05:22:37 +00002250 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002251 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002252
Ariel Eliorad5afc82013-01-01 05:22:26 +00002253 /* number of queues for statistics is number of eth queues + FCoE */
2254 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002255
Ariel Eliorad5afc82013-01-01 05:22:26 +00002256 /* Total number of FW statistics requests =
2257 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2258 * and fcoe l2 queue) stats + num of queues (which includes another 1
2259 * for fcoe l2 queue if applicable)
2260 */
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2262
Ariel Elior8db573b2013-01-01 05:22:37 +00002263 /* vf stats appear in the request list, but their data is allocated by
2264 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2265 * it is used to determine where to place the vf stats queries in the
2266 * request struct
2267 */
2268 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002269 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002270
Ariel Eliorad5afc82013-01-01 05:22:26 +00002271 /* Request is built from stats_query_header and an array of
2272 * stats_query_cmd_group each of which contains
2273 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2274 * configured in the stats_query_header.
2275 */
2276 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002279 1 : 0));
2280
Ariel Elior8db573b2013-01-01 05:22:37 +00002281 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2282 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2284 num_groups * sizeof(struct stats_query_cmd_group);
2285
2286 /* Data for statistics requests + stats_counter
2287 * stats_counter holds per-STORM counters that are incremented
2288 * when STORM has finished with the current request.
2289 * memory for FCoE offloaded statistics are counted anyway,
2290 * even if they will not be sent.
2291 * VF stats are not accounted for here as the data of VF stats is stored
2292 * in memory allocated by the VF, not here.
2293 */
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2295 sizeof(struct per_pf_stats) +
2296 sizeof(struct fcoe_statistics_params) +
2297 sizeof(struct per_queue_stats) * num_queue_stats +
2298 sizeof(struct stats_counter);
2299
Joe Perchescd2b0382014-02-20 13:25:51 -08002300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2302 if (!bp->fw_stats)
2303 goto alloc_mem_err;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002304
2305 /* Set shortcuts */
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2311 bp->fw_stats_req_sz;
2312
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002313 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002314 U64_HI(bp->fw_stats_req_mapping),
2315 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002316 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002317 U64_HI(bp->fw_stats_data_mapping),
2318 U64_LO(bp->fw_stats_data_mapping));
2319 return 0;
2320
2321alloc_mem_err:
2322 bnx2x_free_fw_stats_mem(bp);
2323 BNX2X_ERR("Can't allocate FW stats memory\n");
2324 return -ENOMEM;
2325}
2326
2327/* send load request to mcp and analyze response */
2328static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2329{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002330 u32 param;
2331
Ariel Eliorad5afc82013-01-01 05:22:26 +00002332 /* init fw_seq */
2333 bp->fw_seq =
2334 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2335 DRV_MSG_SEQ_NUMBER_MASK);
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2337
2338 /* Get current FW pulse sequence */
2339 bp->fw_drv_pulse_wr_seq =
2340 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2341 DRV_PULSE_SEQ_MASK);
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2343
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002344 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2345
2346 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2347 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2348
Ariel Eliorad5afc82013-01-01 05:22:26 +00002349 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002350 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002351
2352 /* if mcp fails to respond we must abort */
2353 if (!(*load_code)) {
2354 BNX2X_ERR("MCP response failure, aborting\n");
2355 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002356 }
2357
Ariel Eliorad5afc82013-01-01 05:22:26 +00002358 /* If mcp refused (e.g. other port is in diagnostic mode) we
2359 * must abort
2360 */
2361 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2362 BNX2X_ERR("MCP refused load request, aborting\n");
2363 return -EBUSY;
2364 }
2365 return 0;
2366}
2367
2368/* check whether another PF has already loaded FW to chip. In
2369 * virtualized environments a pf from another VM may have already
2370 * initialized the device including loading FW
2371 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002372int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002373{
2374 /* is another pf loaded on this engine? */
2375 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2376 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2377 /* build my FW version dword */
2378 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2379 (BCM_5710_FW_MINOR_VERSION << 8) +
2380 (BCM_5710_FW_REVISION_VERSION << 16) +
2381 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2382
2383 /* read loaded FW from chip */
2384 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2385
2386 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2387 loaded_fw, my_fw);
2388
2389 /* abort nic load if version mismatch */
2390 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002391 if (print_err)
2392 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2393 loaded_fw, my_fw);
2394 else
2395 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2396 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002397 return -EBUSY;
2398 }
2399 }
2400 return 0;
2401}
2402
2403/* returns the "mcp load_code" according to global load_count array */
2404static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2405{
2406 int path = BP_PATH(bp);
2407
2408 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002409 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2410 bnx2x_load_count[path][2]);
2411 bnx2x_load_count[path][0]++;
2412 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002413 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002414 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2415 bnx2x_load_count[path][2]);
2416 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002417 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002418 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002419 return FW_MSG_CODE_DRV_LOAD_PORT;
2420 else
2421 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2422}
2423
2424/* mark PMF if applicable */
2425static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2426{
2427 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2428 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2429 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2430 bp->port.pmf = 1;
2431 /* We need the barrier to ensure the ordering between the
2432 * writing to bp->port.pmf here and reading it from the
2433 * bnx2x_periodic_task().
2434 */
2435 smp_mb();
2436 } else {
2437 bp->port.pmf = 0;
2438 }
2439
2440 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2441}
2442
2443static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2444{
2445 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2446 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2447 (bp->common.shmem2_base)) {
2448 if (SHMEM2_HAS(bp, dcc_support))
2449 SHMEM2_WR(bp, dcc_support,
2450 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2451 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2452 if (SHMEM2_HAS(bp, afex_driver_support))
2453 SHMEM2_WR(bp, afex_driver_support,
2454 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2455 }
2456
2457 /* Set AFEX default VLAN tag to an invalid value */
2458 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002459}
2460
Eric Dumazet1191cb82012-04-27 21:39:21 +00002461/**
2462 * bnx2x_bz_fp - zero content of the fastpath structure.
2463 *
2464 * @bp: driver handle
2465 * @index: fastpath index to be zeroed
2466 *
2467 * Makes sure the contents of the bp->fp[index].napi is kept
2468 * intact.
2469 */
2470static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2471{
2472 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002473 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002474 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002475 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002476
Eric Dumazet1191cb82012-04-27 21:39:21 +00002477 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002478 if (fp->tpa_info)
2479 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2480 sizeof(struct bnx2x_agg_info));
2481 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002482
2483 /* Restore the NAPI object as it has been already initialized */
2484 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002485 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002486 fp->bp = bp;
2487 fp->index = index;
2488 if (IS_ETH_FP(fp))
2489 fp->max_cos = bp->max_cos;
2490 else
2491 /* Special queues support only one CoS */
2492 fp->max_cos = 1;
2493
Merav Sicron65565882012-06-19 07:48:26 +00002494 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002495 if (IS_FCOE_FP(fp))
2496 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002497 if (IS_ETH_FP(fp))
2498 for_each_cos_in_tx_queue(fp, cos)
2499 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2500 BNX2X_NUM_ETH_QUEUES(bp) + index];
2501
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002502 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002503 * minimal size so it must be set prior to queue memory allocation
2504 */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002505 if (bp->dev->features & NETIF_F_LRO)
Eric Dumazet1191cb82012-04-27 21:39:21 +00002506 fp->mode = TPA_MODE_LRO;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02002507 else if (bp->dev->features & NETIF_F_GRO &&
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002508 bnx2x_mtu_allows_gro(bp->dev->mtu))
Eric Dumazet1191cb82012-04-27 21:39:21 +00002509 fp->mode = TPA_MODE_GRO;
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002510 else
2511 fp->mode = TPA_MODE_DISABLED;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002512
Michal Schmidt22a8f232015-04-27 17:20:38 +02002513 /* We don't want TPA if it's disabled in bp
2514 * or if this is an FCoE L2 ring.
2515 */
2516 if (bp->disable_tpa || IS_FCOE_FP(fp))
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002517 fp->mode = TPA_MODE_DISABLED;
Merav Sicron55c11942012-11-07 00:45:48 +00002518}
2519
Yuval Mintz230d00e2015-07-22 09:16:25 +03002520void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2521{
2522 u32 cur;
2523
2524 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2525 return;
2526
2527 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2528 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2529 cur, state);
2530
2531 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2532}
2533
Merav Sicron55c11942012-11-07 00:45:48 +00002534int bnx2x_load_cnic(struct bnx2x *bp)
2535{
2536 int i, rc, port = BP_PORT(bp);
2537
2538 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2539
2540 mutex_init(&bp->cnic_mutex);
2541
Ariel Eliorad5afc82013-01-01 05:22:26 +00002542 if (IS_PF(bp)) {
2543 rc = bnx2x_alloc_mem_cnic(bp);
2544 if (rc) {
2545 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2546 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547 }
Merav Sicron55c11942012-11-07 00:45:48 +00002548 }
2549
2550 rc = bnx2x_alloc_fp_mem_cnic(bp);
2551 if (rc) {
2552 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2553 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2554 }
2555
2556 /* Update the number of queues with the cnic queues */
2557 rc = bnx2x_set_real_num_queues(bp, 1);
2558 if (rc) {
2559 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2560 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2561 }
2562
2563 /* Add all CNIC NAPI objects */
2564 bnx2x_add_all_napi_cnic(bp);
2565 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2566 bnx2x_napi_enable_cnic(bp);
2567
2568 rc = bnx2x_init_hw_func_cnic(bp);
2569 if (rc)
2570 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2571
2572 bnx2x_nic_init_cnic(bp);
2573
Ariel Eliorad5afc82013-01-01 05:22:26 +00002574 if (IS_PF(bp)) {
2575 /* Enable Timer scan */
2576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002577
Ariel Eliorad5afc82013-01-01 05:22:26 +00002578 /* setup cnic queues */
2579 for_each_cnic_queue(bp, i) {
2580 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2581 if (rc) {
2582 BNX2X_ERR("Queue setup failed\n");
2583 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2584 }
Merav Sicron55c11942012-11-07 00:45:48 +00002585 }
2586 }
2587
2588 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002589 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002590
2591 /* re-read iscsi info */
2592 bnx2x_get_iscsi_info(bp);
2593 bnx2x_setup_cnic_irq_info(bp);
2594 bnx2x_setup_cnic_info(bp);
2595 bp->cnic_loaded = true;
2596 if (bp->state == BNX2X_STATE_OPEN)
2597 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2598
Merav Sicron55c11942012-11-07 00:45:48 +00002599 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2600
2601 return 0;
2602
2603#ifndef BNX2X_STOP_ON_ERROR
2604load_error_cnic2:
2605 /* Disable Timer scan */
2606 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2607
2608load_error_cnic1:
2609 bnx2x_napi_disable_cnic(bp);
2610 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002611 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002612 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2613load_error_cnic0:
2614 BNX2X_ERR("CNIC-related load failed\n");
2615 bnx2x_free_fp_mem_cnic(bp);
2616 bnx2x_free_mem_cnic(bp);
2617 return rc;
2618#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002619}
2620
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002621/* must be called with rtnl_lock */
2622int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2623{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002624 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002625 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002626
Merav Sicron55c11942012-11-07 00:45:48 +00002627 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2628 DP(NETIF_MSG_IFUP,
2629 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2630
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002631#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002632 if (unlikely(bp->panic)) {
2633 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002634 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002635 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002636#endif
2637
2638 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2639
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002640 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002641 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2642 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2643 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002644
Ariel Eliorad5afc82013-01-01 05:22:26 +00002645 if (IS_PF(bp))
2646 /* must be called before memory allocation and HW init */
2647 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002648
Ariel Elior6383c0b2011-07-14 08:31:57 +00002649 /*
2650 * Zero fastpath structures preserving invariants like napi, which are
2651 * allocated only once, fp index, max_cos, bp pointer.
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02002652 * Also set fp->mode and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002653 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002654 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002655 for_each_queue(bp, i)
2656 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002657 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2658 bp->num_cnic_queues) *
2659 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002660
Merav Sicron55c11942012-11-07 00:45:48 +00002661 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002662
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002663 /* Set the receive queues buffer size */
2664 bnx2x_set_rx_buf_size(bp);
2665
Ariel Eliorad5afc82013-01-01 05:22:26 +00002666 if (IS_PF(bp)) {
2667 rc = bnx2x_alloc_mem(bp);
2668 if (rc) {
2669 BNX2X_ERR("Unable to allocate bp memory\n");
2670 return rc;
2671 }
2672 }
2673
Ariel Eliorad5afc82013-01-01 05:22:26 +00002674 /* need to be done after alloc mem, since it's self adjusting to amount
2675 * of memory available for RSS queues
2676 */
2677 rc = bnx2x_alloc_fp_mem(bp);
2678 if (rc) {
2679 BNX2X_ERR("Unable to allocate memory for fps\n");
2680 LOAD_ERROR_EXIT(bp, load_error0);
2681 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002682
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002683 /* Allocated memory for FW statistics */
2684 if (bnx2x_alloc_fw_stats_mem(bp))
2685 LOAD_ERROR_EXIT(bp, load_error0);
2686
Ariel Elior8d9ac292013-01-01 05:22:27 +00002687 /* request pf to initialize status blocks */
2688 if (IS_VF(bp)) {
2689 rc = bnx2x_vfpf_init(bp);
2690 if (rc)
2691 LOAD_ERROR_EXIT(bp, load_error0);
2692 }
2693
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002694 /* As long as bnx2x_alloc_mem() may possibly update
2695 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002696 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002697 */
Merav Sicron55c11942012-11-07 00:45:48 +00002698 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002699 if (rc) {
2700 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002701 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002702 }
2703
Ariel Elior6383c0b2011-07-14 08:31:57 +00002704 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002705 * this configuration may be overridden by a multi class queue
2706 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002707 */
2708 bnx2x_setup_tc(bp->dev, bp->max_cos);
2709
Merav Sicron26614ba2012-08-27 03:26:19 +00002710 /* Add all NAPI objects */
2711 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002712 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002713 bnx2x_napi_enable(bp);
2714
Ariel Eliorad5afc82013-01-01 05:22:26 +00002715 if (IS_PF(bp)) {
2716 /* set pf load just before approaching the MCP */
2717 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002718
Ariel Eliorad5afc82013-01-01 05:22:26 +00002719 /* if mcp exists send load request and analyze response */
2720 if (!BP_NOMCP(bp)) {
2721 /* attempt to load pf */
2722 rc = bnx2x_nic_load_request(bp, &load_code);
2723 if (rc)
2724 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002725
Ariel Eliorad5afc82013-01-01 05:22:26 +00002726 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002727 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002728 if (rc) {
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002730 LOAD_ERROR_EXIT(bp, load_error2);
2731 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002732 } else {
2733 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002734 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002735
Ariel Eliorad5afc82013-01-01 05:22:26 +00002736 /* mark pmf if applicable */
2737 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002738
Ariel Eliorad5afc82013-01-01 05:22:26 +00002739 /* Init Function state controlling object */
2740 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002741
Ariel Eliorad5afc82013-01-01 05:22:26 +00002742 /* Initialize HW */
2743 rc = bnx2x_init_hw(bp, load_code);
2744 if (rc) {
2745 BNX2X_ERR("HW init failed, aborting\n");
2746 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2747 LOAD_ERROR_EXIT(bp, load_error2);
2748 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002749 }
2750
Yuval Mintzecf01c22013-04-22 02:53:03 +00002751 bnx2x_pre_irq_nic_init(bp);
2752
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002753 /* Connect to IRQs */
2754 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002755 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002756 BNX2X_ERR("setup irqs failed\n");
2757 if (IS_PF(bp))
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002759 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002760 }
2761
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002762 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002763 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002764 /* Setup NIC internals and enable interrupts */
2765 bnx2x_post_irq_nic_init(bp, load_code);
2766
Ariel Eliorad5afc82013-01-01 05:22:26 +00002767 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002768 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002769
Ariel Eliorad5afc82013-01-01 05:22:26 +00002770 /* Set AFEX default VLAN tag to an invalid value */
2771 bp->afex_def_vlan_tag = -1;
2772 bnx2x_nic_load_afex_dcc(bp, load_code);
2773 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2774 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002775 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002776 BNX2X_ERR("Function start failed!\n");
2777 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2778
Merav Sicron55c11942012-11-07 00:45:48 +00002779 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002780 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002781
Ariel Eliorad5afc82013-01-01 05:22:26 +00002782 /* Send LOAD_DONE command to MCP */
2783 if (!BP_NOMCP(bp)) {
2784 load_code = bnx2x_fw_command(bp,
2785 DRV_MSG_CODE_LOAD_DONE, 0);
2786 if (!load_code) {
2787 BNX2X_ERR("MCP response failure, aborting\n");
2788 rc = -EBUSY;
2789 LOAD_ERROR_EXIT(bp, load_error3);
2790 }
2791 }
2792
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002793 /* initialize FW coalescing state machines in RAM */
2794 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002795 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002796
Ariel Elior60cad4e2013-09-04 14:09:22 +03002797 /* setup the leading queue */
2798 rc = bnx2x_setup_leading(bp);
2799 if (rc) {
2800 BNX2X_ERR("Setup leading failed!\n");
2801 LOAD_ERROR_EXIT(bp, load_error3);
2802 }
2803
2804 /* set up the rest of the queues */
2805 for_each_nondefault_eth_queue(bp, i) {
2806 if (IS_PF(bp))
2807 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2808 else /* VF */
2809 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002810 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002811 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002812 LOAD_ERROR_EXIT(bp, load_error3);
2813 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002814 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002815
Ariel Elior60cad4e2013-09-04 14:09:22 +03002816 /* setup rss */
2817 rc = bnx2x_init_rss(bp);
2818 if (rc) {
2819 BNX2X_ERR("PF RSS init failed\n");
2820 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002821 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002822
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002823 /* Now when Clients are configured we are ready to work */
2824 bp->state = BNX2X_STATE_OPEN;
2825
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002826 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002827 if (IS_PF(bp))
2828 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002829 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002830 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2831 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002832 if (rc) {
2833 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002834 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002835 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002836
Ariel Eliorad5afc82013-01-01 05:22:26 +00002837 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002838 bnx2x_update_max_mf_config(bp, bp->pending_max);
2839 bp->pending_max = 0;
2840 }
2841
Ariel Eliorad5afc82013-01-01 05:22:26 +00002842 if (bp->port.pmf) {
2843 rc = bnx2x_initial_phy_init(bp, load_mode);
2844 if (rc)
2845 LOAD_ERROR_EXIT(bp, load_error3);
2846 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002847 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002848
2849 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002850
Yuval Mintz05cc5a32015-07-29 15:52:46 +03002851 /* Re-configure vlan filters */
2852 rc = bnx2x_vlan_reconfigure_vid(bp);
2853 if (rc)
2854 LOAD_ERROR_EXIT(bp, load_error3);
2855
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002856 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002857 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002858
Michal Kalderoneeed0182014-08-17 16:47:44 +03002859 if (bp->flags & PTP_SUPPORTED) {
2860 bnx2x_init_ptp(bp);
2861 bnx2x_configure_ptp_filters(bp);
2862 }
2863 /* Start Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864 switch (load_mode) {
2865 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002866 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002867 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002868 break;
2869
2870 case LOAD_OPEN:
2871 netif_tx_start_all_queues(bp->dev);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002872 smp_mb__after_atomic();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002873 break;
2874
2875 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002876 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002877 bp->state = BNX2X_STATE_DIAG;
2878 break;
2879
2880 default:
2881 break;
2882 }
2883
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002884 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002885 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002886 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002887 bnx2x__link_status_update(bp);
2888
2889 /* start the timer */
2890 mod_timer(&bp->timer, jiffies + bp->current_interval);
2891
Merav Sicron55c11942012-11-07 00:45:48 +00002892 if (CNIC_ENABLED(bp))
2893 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002894
Yuval Mintz42f82772014-03-23 18:12:23 +02002895 if (IS_PF(bp))
2896 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2897
Ariel Eliorad5afc82013-01-01 05:22:26 +00002898 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2899 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002900 u32 val;
2901 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
Yuval Mintz230d00e2015-07-22 09:16:25 +03002902 val &= ~DRV_FLAGS_MTU_MASK;
2903 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002904 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2905 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2906 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2907 }
2908
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002909 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002910 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002911 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002912 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002913 return -EBUSY;
2914 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002915
Yuval Mintzc48f3502015-07-22 09:16:26 +03002916 /* Update driver data for On-Chip MFW dump. */
2917 if (IS_PF(bp))
2918 bnx2x_update_mfw_dump(bp);
2919
Barak Witkowski98768792012-06-19 07:48:31 +00002920 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2921 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2922 bnx2x_dcbx_init(bp, false);
2923
Yuval Mintz230d00e2015-07-22 09:16:25 +03002924 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2925 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2926
Merav Sicron55c11942012-11-07 00:45:48 +00002927 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2928
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002929 return 0;
2930
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002931#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002932load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002933 if (IS_PF(bp)) {
2934 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002935
Ariel Eliorad5afc82013-01-01 05:22:26 +00002936 /* Clean queueable objects */
2937 bnx2x_squeeze_objects(bp);
2938 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002939
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002940 /* Free SKBs, SGEs, TPA pool and driver internals */
2941 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002942 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002943 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002944
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002945 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002946 bnx2x_free_irq(bp);
2947load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002948 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002949 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2950 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2951 }
2952
2953 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002954load_error1:
2955 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002956 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002957
Ariel Elior889b9af2012-01-26 06:01:51 +00002958 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002959 if (IS_PF(bp))
2960 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002961load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002962 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002963 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002964 bnx2x_free_mem(bp);
2965
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002966 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002967#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002968}
2969
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002970int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002971{
2972 u8 rc = 0, cos, i;
2973
2974 /* Wait until tx fastpath tasks complete */
2975 for_each_tx_queue(bp, i) {
2976 struct bnx2x_fastpath *fp = &bp->fp[i];
2977
2978 for_each_cos_in_tx_queue(fp, cos)
2979 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2980 if (rc)
2981 return rc;
2982 }
2983 return 0;
2984}
2985
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002986/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002987int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002988{
2989 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002990 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002991
Merav Sicron55c11942012-11-07 00:45:48 +00002992 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2993
Yuval Mintz230d00e2015-07-22 09:16:25 +03002994 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2995 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2996
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002997 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002998 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002999 u32 val;
3000 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3001 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3002 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3003 }
3004
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003005 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00003006 (bp->state == BNX2X_STATE_CLOSED ||
3007 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003008 /* We can get here if the driver has been unloaded
3009 * during parity error recovery and is either waiting for a
3010 * leader to complete or for other functions to unload and
3011 * then ifdown has been issued. In this case we want to
3012 * unload and let other functions to complete a recovery
3013 * process.
3014 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003015 bp->recovery_state = BNX2X_RECOVERY_DONE;
3016 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003017 bnx2x_release_leader_lock(bp);
3018 smp_mb();
3019
Merav Sicron51c1a582012-03-18 10:33:38 +00003020 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3021 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003022 return -EINVAL;
3023 }
3024
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003025 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003026 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00003027 *
3028 * we can get here only after unsuccessful ndo_* callback, during which
3029 * dev->IFF_UP flag is still on.
3030 */
3031 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3032 return 0;
3033
3034 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07003035 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3036 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3037 */
3038 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3039 smp_mb();
3040
Ariel Elior78c3bcc2013-06-20 17:39:08 +03003041 /* indicate to VFs that the PF is going down */
3042 bnx2x_iov_channel_down(bp);
3043
Merav Sicron55c11942012-11-07 00:45:48 +00003044 if (CNIC_LOADED(bp))
3045 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3046
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003047 /* Stop Tx */
3048 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003049 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00003050
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003051 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003052
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003053 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003054
Ariel Eliorad5afc82013-01-01 05:22:26 +00003055 if (IS_PF(bp)) {
3056 /* Set ALWAYS_ALIVE bit in shmem */
3057 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3058 bnx2x_drv_pulse(bp);
3059 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3060 bnx2x_save_statistics(bp);
3061 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003062
Ariel Eliorad5afc82013-01-01 05:22:26 +00003063 /* wait till consumers catch up with producers in all queues */
3064 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003065
Ariel Elior9b176b62013-01-01 05:22:28 +00003066 /* if VF indicate to PF this function is going down (PF will delete sp
3067 * elements and clear initializations
3068 */
3069 if (IS_VF(bp))
3070 bnx2x_vfpf_close_vf(bp);
3071 else if (unload_mode != UNLOAD_RECOVERY)
3072 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00003073 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003074 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003075 /* Send the UNLOAD_REQUEST to the MCP */
3076 bnx2x_send_unload_req(bp, unload_mode);
3077
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003078 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003079 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003080 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003081 * (the engine which leader will perform the recovery
3082 * last).
3083 */
3084 if (!CHIP_IS_E1x(bp))
3085 bnx2x_pf_disable(bp);
3086
3087 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003088 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00003089 /* Delete all NAPI objects */
3090 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003091 if (CNIC_LOADED(bp))
3092 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003093 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003094 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003095
3096 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00003097 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003098 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003099
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003100 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003101 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003102 * the queueable objects here in case they failed to get cleaned so far.
3103 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003104 if (IS_PF(bp))
3105 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003106
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003107 /* There should be no more pending SP commands at this stage */
3108 bp->sp_state = 0;
3109
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003110 bp->port.pmf = 0;
3111
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003112 /* clear pending work in rtnl task */
3113 bp->sp_rtnl_state = 0;
3114 smp_mb();
3115
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003116 /* Free SKBs, SGEs, TPA pool and driver internals */
3117 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003118 if (CNIC_LOADED(bp))
3119 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003120 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003122
Ariel Eliorad5afc82013-01-01 05:22:26 +00003123 bnx2x_free_fp_mem(bp);
3124 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003125 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003126
Ariel Eliorad5afc82013-01-01 05:22:26 +00003127 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003128 if (CNIC_LOADED(bp))
3129 bnx2x_free_mem_cnic(bp);
3130 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003131 bnx2x_free_mem(bp);
3132
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003133 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003134 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003135
Yuval Mintz42f82772014-03-23 18:12:23 +02003136 /* Clear driver version indication in shmem */
3137 if (IS_PF(bp))
3138 bnx2x_update_mng_version(bp);
3139
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003140 /* Check if there are pending parity attentions. If there are - set
3141 * RECOVERY_IN_PROGRESS.
3142 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003143 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003144 bnx2x_set_reset_in_progress(bp);
3145
3146 /* Set RESET_IS_GLOBAL if needed */
3147 if (global)
3148 bnx2x_set_reset_global(bp);
3149 }
3150
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003151 /* The last driver must disable a "close the gate" if there is no
3152 * parity attention or "process kill" pending.
3153 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003154 if (IS_PF(bp) &&
3155 !bnx2x_clear_pf_load(bp) &&
3156 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003157 bnx2x_disable_close_the_gate(bp);
3158
Merav Sicron55c11942012-11-07 00:45:48 +00003159 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3160
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003161 return 0;
3162}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003163
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003164int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3165{
3166 u16 pmcsr;
3167
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003168 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003169 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003170 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003171 return 0;
3172 }
3173
Jon Mason29ed74c2013-09-11 11:22:39 -07003174 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003175
3176 switch (state) {
3177 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003178 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003179 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3180 PCI_PM_CTRL_PME_STATUS));
3181
3182 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3183 /* delay required during transition out of D3hot */
3184 msleep(20);
3185 break;
3186
3187 case PCI_D3hot:
3188 /* If there are other clients above don't
3189 shut down the power */
3190 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3191 return 0;
3192 /* Don't shut down the power for emulation and FPGA */
3193 if (CHIP_REV_IS_SLOW(bp))
3194 return 0;
3195
3196 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3197 pmcsr |= 3;
3198
3199 if (bp->wol)
3200 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3201
Jon Mason29ed74c2013-09-11 11:22:39 -07003202 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003203 pmcsr);
3204
3205 /* No more memory access after this point until
3206 * device is brought back to D0.
3207 */
3208 break;
3209
3210 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003211 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003212 return -EINVAL;
3213 }
3214 return 0;
3215}
3216
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003217/*
3218 * net_device service functions
3219 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003220static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003221{
3222 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003223 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003224 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3225 napi);
3226 struct bnx2x *bp = fp->bp;
3227
3228 while (1) {
3229#ifdef BNX2X_STOP_ON_ERROR
3230 if (unlikely(bp->panic)) {
3231 napi_complete(napi);
3232 return 0;
3233 }
3234#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003235 if (!bnx2x_fp_lock_napi(fp))
Govindarajulu Varadarajan24e579c2015-01-25 16:09:23 +05303236 return budget;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003237
Ariel Elior6383c0b2011-07-14 08:31:57 +00003238 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003239 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3240 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003242 if (bnx2x_has_rx_work(fp)) {
3243 work_done += bnx2x_rx_int(fp, budget - work_done);
3244
3245 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003246 if (work_done >= budget) {
3247 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003248 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003249 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003250 }
3251
Eric Dumazet074975d2015-04-14 18:45:00 -07003252 bnx2x_fp_unlock_napi(fp);
3253
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003254 /* Fall out from the NAPI loop if needed */
Eric Dumazet074975d2015-04-14 18:45:00 -07003255 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003256
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003257 /* No need to update SB for FCoE L2 ring as long as
3258 * it's connected to the default SB and the SB
3259 * has been updated when NAPI was scheduled.
3260 */
3261 if (IS_FCOE_FP(fp)) {
3262 napi_complete(napi);
3263 break;
3264 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003265 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003266 /* bnx2x_has_rx_work() reads the status block,
3267 * thus we need to ensure that status block indices
3268 * have been actually read (bnx2x_update_fpsb_idx)
3269 * prior to this check (bnx2x_has_rx_work) so that
3270 * we won't write the "newer" value of the status block
3271 * to IGU (if there was a DMA right after
3272 * bnx2x_has_rx_work and if there is no rmb, the memory
3273 * reading (bnx2x_update_fpsb_idx) may be postponed
3274 * to right before bnx2x_ack_sb). In this case there
3275 * will never be another interrupt until there is
3276 * another update of the status block, while there
3277 * is still unhandled work.
3278 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003279 rmb();
3280
3281 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3282 napi_complete(napi);
3283 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003284 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003285 "Update index to %d\n", fp->fp_hc_idx);
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3287 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003288 IGU_INT_ENABLE, 1);
3289 break;
3290 }
3291 }
3292 }
3293
3294 return work_done;
3295}
3296
Cong Wange0d10952013-08-01 11:10:25 +08003297#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003298/* must be called with local_bh_disable()d */
3299int bnx2x_low_latency_recv(struct napi_struct *napi)
3300{
3301 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3302 napi);
3303 struct bnx2x *bp = fp->bp;
3304 int found = 0;
3305
3306 if ((bp->state == BNX2X_STATE_CLOSED) ||
3307 (bp->state == BNX2X_STATE_ERROR) ||
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02003308 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003309 return LL_FLUSH_FAILED;
3310
3311 if (!bnx2x_fp_lock_poll(fp))
3312 return LL_FLUSH_BUSY;
3313
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003314 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003315 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003316
3317 bnx2x_fp_unlock_poll(fp);
3318
3319 return found;
3320}
3321#endif
3322
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003323/* we split the first BD into headers and data BDs
3324 * to ease the pain of our fellow microcode engineers
3325 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003326 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003327static u16 bnx2x_tx_split(struct bnx2x *bp,
3328 struct bnx2x_fp_txdata *txdata,
3329 struct sw_tx_bd *tx_buf,
3330 struct eth_tx_start_bd **tx_bd, u16 hlen,
3331 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003332{
3333 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3334 struct eth_tx_bd *d_tx_bd;
3335 dma_addr_t mapping;
3336 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3337
3338 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003339 h_tx_bd->nbytes = cpu_to_le16(hlen);
3340
Dmitry Kravkov91226792013-03-11 05:17:52 +00003341 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3342 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003343
3344 /* now get a new data BD
3345 * (after the pbd) and fill it */
3346 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003347 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003348
3349 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3350 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3351
3352 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3353 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3354 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3355
3356 /* this marks the BD as one that has no individual mapping */
3357 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3358
3359 DP(NETIF_MSG_TX_QUEUED,
3360 "TSO split data size is %d (%x:%x)\n",
3361 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3362
3363 /* update tx_bd */
3364 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3365
3366 return bd_prod;
3367}
3368
Yuval Mintz86564c32013-01-23 03:21:50 +00003369#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3370#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003371static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003372{
Yuval Mintz86564c32013-01-23 03:21:50 +00003373 __sum16 tsum = (__force __sum16) csum;
3374
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003375 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003376 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3377 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003378
3379 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003380 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3381 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003382
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003383 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003384}
3385
Dmitry Kravkov91226792013-03-11 05:17:52 +00003386static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003387{
3388 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003389 __u8 prot = 0;
3390 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003391
3392 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003393 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003394
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003395 protocol = vlan_get_protocol(skb);
3396 if (protocol == htons(ETH_P_IPV6)) {
3397 rc = XMIT_CSUM_V6;
3398 prot = ipv6_hdr(skb)->nexthdr;
3399 } else {
3400 rc = XMIT_CSUM_V4;
3401 prot = ip_hdr(skb)->protocol;
3402 }
3403
3404 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3405 if (inner_ip_hdr(skb)->version == 6) {
3406 rc |= XMIT_CSUM_ENC_V6;
3407 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003408 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003409 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003410 rc |= XMIT_CSUM_ENC_V4;
3411 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003412 rc |= XMIT_CSUM_TCP;
3413 }
3414 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003415 if (prot == IPPROTO_TCP)
3416 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003417
Eric Dumazet36a8f392013-09-29 01:21:32 -07003418 if (skb_is_gso(skb)) {
3419 if (skb_is_gso_v6(skb)) {
3420 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3421 if (rc & XMIT_CSUM_ENC)
3422 rc |= XMIT_GSO_ENC_V6;
3423 } else {
3424 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3425 if (rc & XMIT_CSUM_ENC)
3426 rc |= XMIT_GSO_ENC_V4;
3427 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003428 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003429
3430 return rc;
3431}
3432
3433#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3434/* check if packet requires linearization (packet is too fragmented)
3435 no need to check fragmentation if page size > 8K (there will be no
3436 violation to FW restrictions) */
3437static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3438 u32 xmit_type)
3439{
3440 int to_copy = 0;
3441 int hlen = 0;
3442 int first_bd_sz = 0;
3443
3444 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3445 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3446
3447 if (xmit_type & XMIT_GSO) {
3448 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3449 /* Check if LSO packet needs to be copied:
3450 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3451 int wnd_size = MAX_FETCH_BD - 3;
3452 /* Number of windows to check */
3453 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3454 int wnd_idx = 0;
3455 int frag_idx = 0;
3456 u32 wnd_sum = 0;
3457
3458 /* Headers length */
Yuval Mintz592b9b82015-06-25 15:19:29 +03003459 if (xmit_type & XMIT_GSO_ENC)
3460 hlen = (int)(skb_inner_transport_header(skb) -
3461 skb->data) +
3462 inner_tcp_hdrlen(skb);
3463 else
3464 hlen = (int)(skb_transport_header(skb) -
3465 skb->data) + tcp_hdrlen(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003466
3467 /* Amount of data (w/o headers) on linear part of SKB*/
3468 first_bd_sz = skb_headlen(skb) - hlen;
3469
3470 wnd_sum = first_bd_sz;
3471
3472 /* Calculate the first sum - it's special */
3473 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3474 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003475 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003476
3477 /* If there was data on linear skb data - check it */
3478 if (first_bd_sz > 0) {
3479 if (unlikely(wnd_sum < lso_mss)) {
3480 to_copy = 1;
3481 goto exit_lbl;
3482 }
3483
3484 wnd_sum -= first_bd_sz;
3485 }
3486
3487 /* Others are easier: run through the frag list and
3488 check all windows */
3489 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3490 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003491 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003492
3493 if (unlikely(wnd_sum < lso_mss)) {
3494 to_copy = 1;
3495 break;
3496 }
3497 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003498 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003499 }
3500 } else {
3501 /* in non-LSO too fragmented packet should always
3502 be linearized */
3503 to_copy = 1;
3504 }
3505 }
3506
3507exit_lbl:
3508 if (unlikely(to_copy))
3509 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003510 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003511 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3512 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3513
3514 return to_copy;
3515}
3516#endif
3517
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003518/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003519 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003520 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003521 * @skb: packet skb
3522 * @pbd: parse BD
3523 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003524 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003525static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3526 struct eth_tx_parse_bd_e1x *pbd,
3527 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003528{
3529 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003530 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003531 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003532
3533 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003534 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003535 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003536 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3537 ip_hdr(skb)->daddr,
3538 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003539 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003540 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003541 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3542 &ipv6_hdr(skb)->daddr,
3543 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003544 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003545
Yuval Mintz86564c32013-01-23 03:21:50 +00003546 pbd->global_data |=
3547 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003548}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003549
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003550/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003551 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3552 *
3553 * @bp: driver handle
3554 * @skb: packet skb
3555 * @parsing_data: data to be updated
3556 * @xmit_type: xmit flags
3557 *
3558 * 57712/578xx related, when skb has encapsulation
3559 */
3560static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3561 u32 *parsing_data, u32 xmit_type)
3562{
3563 *parsing_data |=
3564 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3565 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3566 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3567
3568 if (xmit_type & XMIT_CSUM_TCP) {
3569 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3570 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3571 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3572
3573 return skb_inner_transport_header(skb) +
3574 inner_tcp_hdrlen(skb) - skb->data;
3575 }
3576
3577 /* We support checksum offload for TCP and UDP only.
3578 * No need to pass the UDP header length - it's a constant.
3579 */
3580 return skb_inner_transport_header(skb) +
3581 sizeof(struct udphdr) - skb->data;
3582}
3583
3584/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003585 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003586 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003587 * @bp: driver handle
3588 * @skb: packet skb
3589 * @parsing_data: data to be updated
3590 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003591 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003592 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003593 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003594static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3595 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003596{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003597 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003598 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003599 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3600 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003601
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003602 if (xmit_type & XMIT_CSUM_TCP) {
3603 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3604 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3605 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003606
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003607 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003608 }
3609 /* We support checksum offload for TCP and UDP only.
3610 * No need to pass the UDP header length - it's a constant.
3611 */
3612 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003613}
3614
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003615/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003616static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3617 struct eth_tx_start_bd *tx_start_bd,
3618 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003619{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003620 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3621
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003622 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003623 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003624
3625 if (!(xmit_type & XMIT_CSUM_TCP))
3626 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003627}
3628
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003629/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003630 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003631 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003632 * @bp: driver handle
3633 * @skb: packet skb
3634 * @pbd: parse BD to be updated
3635 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003636 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003637static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3638 struct eth_tx_parse_bd_e1x *pbd,
3639 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003640{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003641 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003642
3643 /* for now NS flag is not used in Linux */
3644 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003645 cpu_to_le16(hlen |
3646 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3647 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003648
3649 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003650 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003651
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003652 hlen += pbd->ip_hlen_w;
3653
3654 /* We support checksum offload for TCP and UDP only */
3655 if (xmit_type & XMIT_CSUM_TCP)
3656 hlen += tcp_hdrlen(skb) / 2;
3657 else
3658 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003659
3660 pbd->total_hlen_w = cpu_to_le16(hlen);
3661 hlen = hlen*2;
3662
3663 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003664 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003665
3666 } else {
3667 s8 fix = SKB_CS_OFF(skb); /* signed! */
3668
3669 DP(NETIF_MSG_TX_QUEUED,
3670 "hlen %d fix %d csum before fix %x\n",
3671 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3672
3673 /* HW bug: fixup the CSUM */
3674 pbd->tcp_pseudo_csum =
3675 bnx2x_csum_fix(skb_transport_header(skb),
3676 SKB_CS(skb), fix);
3677
3678 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3679 pbd->tcp_pseudo_csum);
3680 }
3681
3682 return hlen;
3683}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003684
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003685static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3686 struct eth_tx_parse_bd_e2 *pbd_e2,
3687 struct eth_tx_parse_2nd_bd *pbd2,
3688 u16 *global_data,
3689 u32 xmit_type)
3690{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003691 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003692 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003693
Dmitry Kravkove287a752013-03-21 15:38:24 +00003694 /* from outer IP to transport */
3695 hlen_w = (skb_inner_transport_header(skb) -
3696 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003697
3698 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003699 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003700
Dmitry Kravkove287a752013-03-21 15:38:24 +00003701 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003702
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003703 /* outer IP header info */
3704 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003705 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003706 u32 csum = (__force u32)(~iph->check) -
3707 (__force u32)iph->tot_len -
3708 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003709
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003710 outerip_len = iph->ihl << 1;
3711
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003712 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003713 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003714 } else {
3715 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003716 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003717 pbd_e2->data.tunnel_data.flags |=
Yuval Mintz28311f82015-07-22 09:16:22 +03003718 ETH_TUNNEL_DATA_IPV6_OUTER;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003719 }
3720
3721 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3722
3723 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3724
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003725 /* inner IP header info */
3726 if (xmit_type & XMIT_CSUM_ENC_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003727 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003728
3729 pbd_e2->data.tunnel_data.pseudo_csum =
3730 bswab16(~csum_tcpudp_magic(
3731 inner_ip_hdr(skb)->saddr,
3732 inner_ip_hdr(skb)->daddr,
3733 0, IPPROTO_TCP, 0));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003734 } else {
3735 pbd_e2->data.tunnel_data.pseudo_csum =
3736 bswab16(~csum_ipv6_magic(
3737 &inner_ipv6_hdr(skb)->saddr,
3738 &inner_ipv6_hdr(skb)->daddr,
3739 0, IPPROTO_TCP, 0));
3740 }
3741
3742 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3743
3744 *global_data |=
3745 outerip_off |
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003746 (outerip_len <<
3747 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3748 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3749 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003750
3751 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3752 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3753 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3754 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003755}
3756
Dmitry Kravkove42780b2014-08-17 16:47:43 +03003757static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3758 u32 xmit_type)
3759{
3760 struct ipv6hdr *ipv6;
3761
3762 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3763 return;
3764
3765 if (xmit_type & XMIT_GSO_ENC_V6)
3766 ipv6 = inner_ipv6_hdr(skb);
3767 else /* XMIT_GSO_V6 */
3768 ipv6 = ipv6_hdr(skb);
3769
3770 if (ipv6->nexthdr == NEXTHDR_IPV6)
3771 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3772}
3773
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003774/* called with netif_tx_lock
3775 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3776 * netif_wake_queue()
3777 */
3778netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3779{
3780 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003781
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003782 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003783 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003784 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003785 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003786 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003787 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003788 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003789 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003790 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003791 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003792 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003793 dma_addr_t mapping;
3794 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3795 int i;
3796 u8 hlen = 0;
3797 __le16 pkt_size = 0;
3798 struct ethhdr *eth;
3799 u8 mac_type = UNICAST_ADDRESS;
3800
3801#ifdef BNX2X_STOP_ON_ERROR
3802 if (unlikely(bp->panic))
3803 return NETDEV_TX_BUSY;
3804#endif
3805
Ariel Elior6383c0b2011-07-14 08:31:57 +00003806 txq_index = skb_get_queue_mapping(skb);
3807 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003808
Merav Sicron55c11942012-11-07 00:45:48 +00003809 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003810
Merav Sicron65565882012-06-19 07:48:26 +00003811 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003812
3813 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003814 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003815 txq_index, fp_index, txdata_index); */
3816
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003817 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003818 DP(NETIF_MSG_TX_QUEUED,
3819 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003820 txdata->cid, fp_index, txdata_index, txdata, fp); */
3821
3822 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003823 skb_shinfo(skb)->nr_frags +
3824 BDS_PER_TX_PKT +
3825 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003826 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003827 if (txdata->tx_ring_size == 0) {
3828 struct bnx2x_eth_q_stats *q_stats =
3829 bnx2x_fp_qstats(bp, txdata->parent_fp);
3830 q_stats->driver_filtered_tx_pkt++;
3831 dev_kfree_skb(skb);
3832 return NETDEV_TX_OK;
3833 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003834 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3835 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003836 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003837
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003838 return NETDEV_TX_BUSY;
3839 }
3840
Merav Sicron51c1a582012-03-18 10:33:38 +00003841 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003842 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003843 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003844 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3845 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003846
3847 eth = (struct ethhdr *)skb->data;
3848
3849 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3850 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3851 if (is_broadcast_ether_addr(eth->h_dest))
3852 mac_type = BROADCAST_ADDRESS;
3853 else
3854 mac_type = MULTICAST_ADDRESS;
3855 }
3856
Dmitry Kravkov91226792013-03-11 05:17:52 +00003857#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003858 /* First, check if we need to linearize the skb (due to FW
3859 restrictions). No need to check fragmentation if page size > 8K
3860 (there will be no violation to FW restrictions) */
3861 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3862 /* Statistics of linearization */
3863 bp->lin_cnt++;
3864 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003865 DP(NETIF_MSG_TX_QUEUED,
3866 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003867 dev_kfree_skb_any(skb);
3868 return NETDEV_TX_OK;
3869 }
3870 }
3871#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003872 /* Map skb linear data for DMA */
3873 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3874 skb_headlen(skb), DMA_TO_DEVICE);
3875 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003876 DP(NETIF_MSG_TX_QUEUED,
3877 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003878 dev_kfree_skb_any(skb);
3879 return NETDEV_TX_OK;
3880 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003881 /*
3882 Please read carefully. First we use one BD which we mark as start,
3883 then we have a parsing info BD (used for TSO or xsum),
3884 and only then we have the rest of the TSO BDs.
3885 (don't forget to mark the last one as last,
3886 and to unmap only AFTER you write to the BD ...)
3887 And above all, all pdb sizes are in words - NOT DWORDS!
3888 */
3889
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003890 /* get current pkt produced now - advance it just before sending packet
3891 * since mapping of pages may fail and cause packet to be dropped
3892 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003893 pkt_prod = txdata->tx_pkt_prod;
3894 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003895
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003896 /* get a tx_buf and first BD
3897 * tx_start_bd may be changed during SPLIT,
3898 * but first_bd will always stay first
3899 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003900 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3901 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003902 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003903
3904 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003905
Michal Kalderoneeed0182014-08-17 16:47:44 +03003906 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3907 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3908 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3909 } else if (bp->ptp_tx_skb) {
3910 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3911 } else {
3912 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3913 /* schedule check for Tx timestamp */
3914 bp->ptp_tx_skb = skb_get(skb);
3915 bp->ptp_tx_start = jiffies;
3916 schedule_work(&bp->ptp_task);
3917 }
3918 }
3919
Dmitry Kravkov91226792013-03-11 05:17:52 +00003920 /* header nbd: indirectly zero other flags! */
3921 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003922
3923 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003924 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003925 tx_buf->skb = skb;
3926 tx_buf->flags = 0;
3927
3928 DP(NETIF_MSG_TX_QUEUED,
3929 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003930 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003931
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003932 if (skb_vlan_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003933 tx_start_bd->vlan_or_ethertype =
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003934 cpu_to_le16(skb_vlan_tag_get(skb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003935 tx_start_bd->bd_flags.as_bitfield |=
3936 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003937 } else {
3938 /* when transmitting in a vf, start bd must hold the ethertype
3939 * for fw to enforce it
3940 */
Yuval Mintzea36475a2014-08-25 17:48:30 +03003941#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov91226792013-03-11 05:17:52 +00003942 if (IS_VF(bp))
Yuval Mintzea36475a2014-08-25 17:48:30 +03003943#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003944 tx_start_bd->vlan_or_ethertype =
3945 cpu_to_le16(ntohs(eth->h_proto));
Yuval Mintzea36475a2014-08-25 17:48:30 +03003946#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov91226792013-03-11 05:17:52 +00003947 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003948 /* used by FW for packet accounting */
3949 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Yuval Mintzea36475a2014-08-25 17:48:30 +03003950#endif
Ariel Eliordc1ba592013-01-01 05:22:30 +00003951 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003952
Dmitry Kravkov91226792013-03-11 05:17:52 +00003953 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3954
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003955 /* turn on parsing and get a BD */
3956 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003957
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003958 if (xmit_type & XMIT_CSUM)
3959 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003960
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003961 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003962 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003963 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003964
3965 if (xmit_type & XMIT_CSUM_ENC) {
3966 u16 global_data = 0;
3967
3968 /* Set PBD in enc checksum offload case */
3969 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3970 &pbd_e2_parsing_data,
3971 xmit_type);
3972
3973 /* turn on 2nd parsing and get a BD */
3974 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3975
3976 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3977
3978 memset(pbd2, 0, sizeof(*pbd2));
3979
3980 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3981 (skb_inner_network_header(skb) -
3982 skb->data) >> 1;
3983
3984 if (xmit_type & XMIT_GSO_ENC)
3985 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3986 &global_data,
3987 xmit_type);
3988
3989 pbd2->global_data = cpu_to_le16(global_data);
3990
3991 /* add addition parse BD indication to start BD */
3992 SET_FLAG(tx_start_bd->general_data,
3993 ETH_TX_START_BD_PARSE_NBDS, 1);
3994 /* set encapsulation flag in start BD */
3995 SET_FLAG(tx_start_bd->general_data,
3996 ETH_TX_START_BD_TUNNEL_EXIST, 1);
Dmitry Kravkovfe26566d2014-07-24 18:54:47 +03003997
3998 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3999
Dmitry Kravkova848ade2013-03-18 06:51:03 +00004000 nbd++;
4001 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00004002 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00004003 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4004 &pbd_e2_parsing_data,
4005 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00004006 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00004007
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004008 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
Yuval Mintzbabe7232014-02-27 15:42:26 +02004009 /* Add the macs to the parsing BD if this is a vf or if
4010 * Tx Switching is enabled.
4011 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00004012 if (IS_VF(bp)) {
4013 /* override GRE parameters in BD */
4014 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4015 &pbd_e2->data.mac_addr.src_mid,
4016 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004017 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00004018
4019 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4020 &pbd_e2->data.mac_addr.dst_mid,
4021 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004022 eth->h_dest);
Yuval Mintzea36475a2014-08-25 17:48:30 +03004023 } else {
4024 if (bp->flags & TX_SWITCHING)
4025 bnx2x_set_fw_mac_addr(
4026 &pbd_e2->data.mac_addr.dst_hi,
4027 &pbd_e2->data.mac_addr.dst_mid,
4028 &pbd_e2->data.mac_addr.dst_lo,
4029 eth->h_dest);
4030#ifdef BNX2X_STOP_ON_ERROR
4031 /* Enforce security is always set in Stop on Error -
4032 * source mac should be present in the parsing BD
4033 */
4034 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4035 &pbd_e2->data.mac_addr.src_mid,
4036 &pbd_e2->data.mac_addr.src_lo,
4037 eth->h_source);
4038#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004039 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004040
4041 SET_FLAG(pbd_e2_parsing_data,
4042 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004043 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004044 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004045 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004046 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4047 /* Set PBD in checksum offload case */
4048 if (xmit_type & XMIT_CSUM)
4049 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004050
Yuval Mintz96bed4b2012-10-01 03:46:19 +00004051 SET_FLAG(global_data,
4052 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4053 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004054 }
4055
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004056 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004057 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4058 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004059 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4060 pkt_size = tx_start_bd->nbytes;
4061
Merav Sicron51c1a582012-03-18 10:33:38 +00004062 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004063 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004064 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004065 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004066 tx_start_bd->bd_flags.as_bitfield,
4067 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004068
4069 if (xmit_type & XMIT_GSO) {
4070
4071 DP(NETIF_MSG_TX_QUEUED,
4072 "TSO packet len %d hlen %d total len %d tso size %d\n",
4073 skb->len, hlen, skb_headlen(skb),
4074 skb_shinfo(skb)->gso_size);
4075
4076 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4077
Dmitry Kravkov91226792013-03-11 05:17:52 +00004078 if (unlikely(skb_headlen(skb) > hlen)) {
4079 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004080 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4081 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00004082 bd_prod);
4083 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004084 if (!CHIP_IS_E1x(bp))
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004085 pbd_e2_parsing_data |=
4086 (skb_shinfo(skb)->gso_size <<
4087 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4088 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004089 else
Dmitry Kravkove42780b2014-08-17 16:47:43 +03004090 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004091 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00004092
4093 /* Set the PBD's parsing_data field if not zero
4094 * (for the chips newer than 57711).
4095 */
4096 if (pbd_e2_parsing_data)
4097 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4098
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004099 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4100
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004101 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4103 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4104
Eric Dumazet9e903e02011-10-18 21:00:24 +00004105 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4106 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004107 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00004108 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004109
Merav Sicron51c1a582012-03-18 10:33:38 +00004110 DP(NETIF_MSG_TX_QUEUED,
4111 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004112
4113 /* we need unmap all buffers already mapped
4114 * for this SKB;
4115 * first_bd->nbd need to be properly updated
4116 * before call to bnx2x_free_tx_pkt
4117 */
4118 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004119 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00004120 TX_BD(txdata->tx_pkt_prod),
4121 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004122 return NETDEV_TX_OK;
4123 }
4124
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004125 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00004126 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004127 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00004128 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004129
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004130 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4131 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00004132 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4133 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004134 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004135
4136 DP(NETIF_MSG_TX_QUEUED,
4137 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4138 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4139 le16_to_cpu(tx_data_bd->nbytes));
4140 }
4141
4142 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4143
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004144 /* update with actual num BDs */
4145 first_bd->nbd = cpu_to_le16(nbd);
4146
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004147 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4148
4149 /* now send a tx doorbell, counting the next BD
4150 * if the packet contains or ends with it
4151 */
4152 if (TX_BD_POFF(bd_prod) < nbd)
4153 nbd++;
4154
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004155 /* total_pkt_bytes should be set on the first data BD if
4156 * it's not an LSO packet and there is more than one
4157 * data BD. In this case pkt_size is limited by an MTU value.
4158 * However we prefer to set it for an LSO packet (while we don't
4159 * have to) in order to save some CPU cycles in a none-LSO
4160 * case, when we much more care about them.
4161 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004162 if (total_pkt_bd != NULL)
4163 total_pkt_bd->total_pkt_bytes = pkt_size;
4164
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004165 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004166 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004167 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004168 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4169 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4170 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4171 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004172 if (pbd_e2)
4173 DP(NETIF_MSG_TX_QUEUED,
4174 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004175 pbd_e2,
4176 pbd_e2->data.mac_addr.dst_hi,
4177 pbd_e2->data.mac_addr.dst_mid,
4178 pbd_e2->data.mac_addr.dst_lo,
4179 pbd_e2->data.mac_addr.src_hi,
4180 pbd_e2->data.mac_addr.src_mid,
4181 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004182 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004183 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4184
Tom Herbert2df1a702011-11-28 16:33:37 +00004185 netdev_tx_sent_queue(txq, skb->len);
4186
Willem de Bruijn8373c572012-04-27 09:04:06 +00004187 skb_tx_timestamp(skb);
4188
Ariel Elior6383c0b2011-07-14 08:31:57 +00004189 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004190 /*
4191 * Make sure that the BD data is updated before updating the producer
4192 * since FW might read the BD right after the producer is updated.
4193 * This is only applicable for weak-ordered memory model archs such
4194 * as IA-64. The following barrier is also mandatory since FW will
4195 * assumes packets must have BDs.
4196 */
4197 wmb();
4198
Ariel Elior6383c0b2011-07-14 08:31:57 +00004199 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004200 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004201
Ariel Elior6383c0b2011-07-14 08:31:57 +00004202 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004203
4204 mmiowb();
4205
Ariel Elior6383c0b2011-07-14 08:31:57 +00004206 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004207
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004208 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004209 netif_tx_stop_queue(txq);
4210
4211 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4212 * ordering of set_bit() in netif_tx_stop_queue() and read of
4213 * fp->bd_tx_cons */
4214 smp_mb();
4215
Barak Witkowski15192a82012-06-19 07:48:28 +00004216 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004217 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004218 netif_tx_wake_queue(txq);
4219 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004220 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004221
4222 return NETDEV_TX_OK;
4223}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004224
Yuval Mintz230d00e2015-07-22 09:16:25 +03004225void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4226{
4227 int mfw_vn = BP_FW_MB_IDX(bp);
4228 u32 tmp;
4229
4230 /* If the shmem shouldn't affect configuration, reflect */
4231 if (!IS_MF_BD(bp)) {
4232 int i;
4233
4234 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4235 c2s_map[i] = i;
4236 *c2s_default = 0;
4237
4238 return;
4239 }
4240
4241 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4242 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4243 c2s_map[0] = tmp & 0xff;
4244 c2s_map[1] = (tmp >> 8) & 0xff;
4245 c2s_map[2] = (tmp >> 16) & 0xff;
4246 c2s_map[3] = (tmp >> 24) & 0xff;
4247
4248 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4249 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4250 c2s_map[4] = tmp & 0xff;
4251 c2s_map[5] = (tmp >> 8) & 0xff;
4252 c2s_map[6] = (tmp >> 16) & 0xff;
4253 c2s_map[7] = (tmp >> 24) & 0xff;
4254
4255 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4256 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4257 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4258}
4259
Ariel Elior6383c0b2011-07-14 08:31:57 +00004260/**
4261 * bnx2x_setup_tc - routine to configure net_device for multi tc
4262 *
4263 * @netdev: net device to configure
4264 * @tc: number of traffic classes to enable
4265 *
4266 * callback connected to the ndo_setup_tc function pointer
4267 */
4268int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4269{
Ariel Elior6383c0b2011-07-14 08:31:57 +00004270 struct bnx2x *bp = netdev_priv(dev);
Yuval Mintz230d00e2015-07-22 09:16:25 +03004271 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4272 int cos, prio, count, offset;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004273
4274 /* setup tc must be called under rtnl lock */
4275 ASSERT_RTNL();
4276
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004277 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004278 if (!num_tc) {
4279 netdev_reset_tc(dev);
4280 return 0;
4281 }
4282
4283 /* requested to support too many traffic classes */
4284 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004285 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004286 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004287 return -EINVAL;
4288 }
4289
4290 /* declare amount of supported traffic classes */
4291 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004292 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004293 return -EINVAL;
4294 }
4295
Yuval Mintz230d00e2015-07-22 09:16:25 +03004296 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4297
Ariel Elior6383c0b2011-07-14 08:31:57 +00004298 /* configure priority to traffic class mapping */
4299 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
Yuval Mintz230d00e2015-07-22 09:16:25 +03004300 int outer_prio = c2s_map[prio];
4301
4302 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004303 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4304 "mapping priority %d to tc %d\n",
Yuval Mintz230d00e2015-07-22 09:16:25 +03004305 outer_prio, bp->prio_to_cos[outer_prio]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004306 }
4307
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004308 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004309 This can be used for ets or pfc, and save the effort of setting
4310 up a multio class queue disc or negotiating DCBX with a switch
4311 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004312 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004313 for (prio = 1; prio < 16; prio++) {
4314 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004315 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004316 } */
4317
4318 /* configure traffic class to transmission queue mapping */
4319 for (cos = 0; cos < bp->max_cos; cos++) {
4320 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004321 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004322 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004323 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4324 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004325 cos, offset, count);
4326 }
4327
4328 return 0;
4329}
4330
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004331/* called with rtnl_lock */
4332int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4333{
4334 struct sockaddr *addr = p;
4335 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004336 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004337
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004338 if (!is_valid_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004339 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004340 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004341 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004342
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004343 if (IS_MF_STORAGE_ONLY(bp)) {
4344 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004345 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004346 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004347
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004348 if (netif_running(dev)) {
4349 rc = bnx2x_set_eth_mac(bp, false);
4350 if (rc)
4351 return rc;
4352 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004353
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004354 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4355
4356 if (netif_running(dev))
4357 rc = bnx2x_set_eth_mac(bp, true);
4358
Yuval Mintz230d00e2015-07-22 09:16:25 +03004359 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4360 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4361
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004362 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004363}
4364
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004365static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4366{
4367 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4368 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004369 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004370
4371 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004372
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004373 if (IS_FCOE_IDX(fp_index)) {
4374 memset(sb, 0, sizeof(union host_hc_status_block));
4375 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004376 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004377 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004378 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004379 BNX2X_PCI_FREE(sb->e2_sb,
4380 bnx2x_fp(bp, fp_index,
4381 status_blk_mapping),
4382 sizeof(struct host_hc_status_block_e2));
4383 else
4384 BNX2X_PCI_FREE(sb->e1x_sb,
4385 bnx2x_fp(bp, fp_index,
4386 status_blk_mapping),
4387 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004388 }
Merav Sicron55c11942012-11-07 00:45:48 +00004389
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004390 /* Rx */
4391 if (!skip_rx_queue(bp, fp_index)) {
4392 bnx2x_free_rx_bds(fp);
4393
4394 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4395 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4396 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4397 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4398 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4399
4400 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4401 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4402 sizeof(struct eth_fast_path_rx_cqe) *
4403 NUM_RCQ_BD);
4404
4405 /* SGE ring */
4406 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4407 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4408 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4409 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4410 }
4411
4412 /* Tx */
4413 if (!skip_tx_queue(bp, fp_index)) {
4414 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004415 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004416 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004417
Merav Sicron51c1a582012-03-18 10:33:38 +00004418 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004419 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004420 fp_index, cos, txdata->cid);
4421
4422 BNX2X_FREE(txdata->tx_buf_ring);
4423 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4424 txdata->tx_desc_mapping,
4425 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4426 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004427 }
4428 /* end of fastpath */
4429}
4430
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004431static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004432{
4433 int i;
4434 for_each_cnic_queue(bp, i)
4435 bnx2x_free_fp_mem_at(bp, i);
4436}
4437
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004438void bnx2x_free_fp_mem(struct bnx2x *bp)
4439{
4440 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004441 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004442 bnx2x_free_fp_mem_at(bp, i);
4443}
4444
Eric Dumazet1191cb82012-04-27 21:39:21 +00004445static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004446{
4447 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004448 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004449 bnx2x_fp(bp, index, sb_index_values) =
4450 (__le16 *)status_blk.e2_sb->sb.index_values;
4451 bnx2x_fp(bp, index, sb_running_index) =
4452 (__le16 *)status_blk.e2_sb->sb.running_index;
4453 } else {
4454 bnx2x_fp(bp, index, sb_index_values) =
4455 (__le16 *)status_blk.e1x_sb->sb.index_values;
4456 bnx2x_fp(bp, index, sb_running_index) =
4457 (__le16 *)status_blk.e1x_sb->sb.running_index;
4458 }
4459}
4460
Eric Dumazet1191cb82012-04-27 21:39:21 +00004461/* Returns the number of actually allocated BDs */
4462static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4463 int rx_ring_size)
4464{
4465 struct bnx2x *bp = fp->bp;
4466 u16 ring_prod, cqe_ring_prod;
4467 int i, failure_cnt = 0;
4468
4469 fp->rx_comp_cons = 0;
4470 cqe_ring_prod = ring_prod = 0;
4471
4472 /* This routine is called only during fo init so
4473 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4474 */
4475 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004476 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004477 failure_cnt++;
4478 continue;
4479 }
4480 ring_prod = NEXT_RX_IDX(ring_prod);
4481 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4482 WARN_ON(ring_prod <= (i - failure_cnt));
4483 }
4484
4485 if (failure_cnt)
4486 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4487 i - failure_cnt, fp->index);
4488
4489 fp->rx_bd_prod = ring_prod;
4490 /* Limit the CQE producer by the CQE ring size */
4491 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4492 cqe_ring_prod);
4493 fp->rx_pkt = fp->rx_calls = 0;
4494
Barak Witkowski15192a82012-06-19 07:48:28 +00004495 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004496
4497 return i - failure_cnt;
4498}
4499
4500static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4501{
4502 int i;
4503
4504 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4505 struct eth_rx_cqe_next_page *nextpg;
4506
4507 nextpg = (struct eth_rx_cqe_next_page *)
4508 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4509 nextpg->addr_hi =
4510 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4511 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4512 nextpg->addr_lo =
4513 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4514 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4515 }
4516}
4517
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004518static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4519{
4520 union host_hc_status_block *sb;
4521 struct bnx2x_fastpath *fp = &bp->fp[index];
4522 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004523 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004524 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004525
Dmitry Kravkov2e98ffc2014-09-17 16:24:36 +03004526 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004527 rx_ring_size = MIN_RX_SIZE_NONTPA;
4528 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004529 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4531
Yuval Mintz065f8b92012-10-03 04:22:59 +00004532 if (CHIP_IS_E3(bp)) {
4533 u32 cfg = SHMEM_RD(bp,
4534 dev_info.port_hw_config[BP_PORT(bp)].
4535 default_cfg);
4536
4537 /* Decrease ring size for 1G functions */
4538 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4539 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4540 rx_ring_size /= 10;
4541 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004542
David S. Miller8decf862011-09-22 03:23:13 -04004543 /* allocate at least number of buffers required by FW */
4544 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4545 MIN_RX_SIZE_TPA, rx_ring_size);
4546
4547 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004548 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004549 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004550
Yuval Mintz04c46732013-01-23 03:21:46 +00004551 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4552
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004553 /* Common */
4554 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004555
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004556 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004557 /* status blocks */
Joe Perchescd2b0382014-02-20 13:25:51 -08004558 if (!CHIP_IS_E1x(bp)) {
4559 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4560 sizeof(struct host_hc_status_block_e2));
4561 if (!sb->e2_sb)
4562 goto alloc_mem_err;
4563 } else {
4564 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4565 sizeof(struct host_hc_status_block_e1x));
4566 if (!sb->e1x_sb)
4567 goto alloc_mem_err;
4568 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004569 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004570
4571 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4572 * set shortcuts for it.
4573 */
4574 if (!IS_FCOE_IDX(index))
4575 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004576
4577 /* Tx */
4578 if (!skip_tx_queue(bp, index)) {
4579 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004580 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004581 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004582
Merav Sicron51c1a582012-03-18 10:33:38 +00004583 DP(NETIF_MSG_IFUP,
4584 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004585 index, cos);
4586
Joe Perchescd2b0382014-02-20 13:25:51 -08004587 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4588 sizeof(struct sw_tx_bd),
4589 GFP_KERNEL);
4590 if (!txdata->tx_buf_ring)
4591 goto alloc_mem_err;
4592 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4593 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4594 if (!txdata->tx_desc_ring)
4595 goto alloc_mem_err;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004596 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004597 }
4598
4599 /* Rx */
4600 if (!skip_rx_queue(bp, index)) {
4601 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Joe Perchescd2b0382014-02-20 13:25:51 -08004602 bnx2x_fp(bp, index, rx_buf_ring) =
4603 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4604 if (!bnx2x_fp(bp, index, rx_buf_ring))
4605 goto alloc_mem_err;
4606 bnx2x_fp(bp, index, rx_desc_ring) =
4607 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4608 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4609 if (!bnx2x_fp(bp, index, rx_desc_ring))
4610 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004611
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004612 /* Seed all CQEs by 1s */
Joe Perchescd2b0382014-02-20 13:25:51 -08004613 bnx2x_fp(bp, index, rx_comp_ring) =
4614 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4615 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4616 if (!bnx2x_fp(bp, index, rx_comp_ring))
4617 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004618
4619 /* SGE ring */
Joe Perchescd2b0382014-02-20 13:25:51 -08004620 bnx2x_fp(bp, index, rx_page_ring) =
4621 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4622 GFP_KERNEL);
4623 if (!bnx2x_fp(bp, index, rx_page_ring))
4624 goto alloc_mem_err;
4625 bnx2x_fp(bp, index, rx_sge_ring) =
4626 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4627 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4628 if (!bnx2x_fp(bp, index, rx_sge_ring))
4629 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004630 /* RX BD ring */
4631 bnx2x_set_next_page_rx_bd(fp);
4632
4633 /* CQ ring */
4634 bnx2x_set_next_page_rx_cq(fp);
4635
4636 /* BDs */
4637 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4638 if (ring_size < rx_ring_size)
4639 goto alloc_mem_err;
4640 }
4641
4642 return 0;
4643
4644/* handles low memory cases */
4645alloc_mem_err:
4646 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4647 index, ring_size);
4648 /* FW will drop all packets if queue is not big enough,
4649 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004650 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004651 */
Michal Schmidt7e6b4d42015-04-28 11:34:22 +02004652 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004653 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004654 /* release memory allocated for this queue */
4655 bnx2x_free_fp_mem_at(bp, index);
4656 return -ENOMEM;
4657 }
4658 return 0;
4659}
4660
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004661static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004662{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004663 if (!NO_FCOE(bp))
4664 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004665 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004666 /* we will fail load process instead of mark
4667 * NO_FCOE_FLAG
4668 */
4669 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004670
4671 return 0;
4672}
4673
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004674static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004675{
4676 int i;
4677
4678 /* 1. Allocate FP for leading - fatal if error
4679 * 2. Allocate RSS - fix number of queues if error
4680 */
4681
4682 /* leading */
4683 if (bnx2x_alloc_fp_mem_at(bp, 0))
4684 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004685
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004686 /* RSS */
4687 for_each_nondefault_eth_queue(bp, i)
4688 if (bnx2x_alloc_fp_mem_at(bp, i))
4689 break;
4690
4691 /* handle memory failures */
4692 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4693 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4694
4695 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004696 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004697 if (CNIC_SUPPORT(bp))
4698 /* move non eth FPs next to last eth FP
4699 * must be done in that order
4700 * FCOE_IDX < FWD_IDX < OOO_IDX
4701 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004702
Merav Sicron55c11942012-11-07 00:45:48 +00004703 /* move FCoE fp even NO_FCOE_FLAG is on */
4704 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4705 bp->num_ethernet_queues -= delta;
4706 bp->num_queues = bp->num_ethernet_queues +
4707 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004708 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4709 bp->num_queues + delta, bp->num_queues);
4710 }
4711
4712 return 0;
4713}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004714
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004715void bnx2x_free_mem_bp(struct bnx2x *bp)
4716{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004717 int i;
4718
4719 for (i = 0; i < bp->fp_array_size; i++)
4720 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004721 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004722 kfree(bp->sp_objs);
4723 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004724 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004725 kfree(bp->msix_table);
4726 kfree(bp->ilt);
4727}
4728
Bill Pemberton0329aba2012-12-03 09:24:24 -05004729int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004730{
4731 struct bnx2x_fastpath *fp;
4732 struct msix_entry *tbl;
4733 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004734 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004735 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004736 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004737
Ariel Elior6383c0b2011-07-14 08:31:57 +00004738 /*
4739 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004740 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004741 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004742 msix_table_size = bp->igu_sb_cnt;
4743 if (IS_PF(bp))
4744 msix_table_size++;
4745 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004746
4747 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004748 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004749 bp->fp_array_size = fp_array_size;
4750 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004751
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004752 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004753 if (!fp)
4754 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004755 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004756 fp[i].tpa_info =
4757 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4758 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4759 if (!(fp[i].tpa_info))
4760 goto alloc_err;
4761 }
4762
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004763 bp->fp = fp;
4764
Barak Witkowski15192a82012-06-19 07:48:28 +00004765 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004766 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004767 GFP_KERNEL);
4768 if (!bp->sp_objs)
4769 goto alloc_err;
4770
4771 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004772 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004773 GFP_KERNEL);
4774 if (!bp->fp_stats)
4775 goto alloc_err;
4776
Merav Sicron65565882012-06-19 07:48:26 +00004777 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004778 txq_array_size =
4779 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4780 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4781
4782 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4783 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004784 if (!bp->bnx2x_txq)
4785 goto alloc_err;
4786
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004787 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004788 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004789 if (!tbl)
4790 goto alloc_err;
4791 bp->msix_table = tbl;
4792
4793 /* ilt */
4794 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4795 if (!ilt)
4796 goto alloc_err;
4797 bp->ilt = ilt;
4798
4799 return 0;
4800alloc_err:
4801 bnx2x_free_mem_bp(bp);
4802 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004803}
4804
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004805int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004806{
4807 struct bnx2x *bp = netdev_priv(dev);
4808
4809 if (unlikely(!netif_running(dev)))
4810 return 0;
4811
Yuval Mintz5d07d862012-09-13 02:56:21 +00004812 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004813 return bnx2x_nic_load(bp, LOAD_NORMAL);
4814}
4815
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004816int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4817{
4818 u32 sel_phy_idx = 0;
4819 if (bp->link_params.num_phys <= 1)
4820 return INT_PHY;
4821
4822 if (bp->link_vars.link_up) {
4823 sel_phy_idx = EXT_PHY1;
4824 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4825 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4826 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4827 sel_phy_idx = EXT_PHY2;
4828 } else {
4829
4830 switch (bnx2x_phy_selection(&bp->link_params)) {
4831 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4832 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4833 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4834 sel_phy_idx = EXT_PHY1;
4835 break;
4836 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4837 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4838 sel_phy_idx = EXT_PHY2;
4839 break;
4840 }
4841 }
4842
4843 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004844}
4845int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4846{
4847 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4848 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004849 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004850 * swapping is enabled). So when swapping is enabled, we need to reverse
4851 * the configuration
4852 */
4853
4854 if (bp->link_params.multi_phy_config &
4855 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4856 if (sel_phy_idx == EXT_PHY1)
4857 sel_phy_idx = EXT_PHY2;
4858 else if (sel_phy_idx == EXT_PHY2)
4859 sel_phy_idx = EXT_PHY1;
4860 }
4861 return LINK_CONFIG_IDX(sel_phy_idx);
4862}
4863
Merav Sicron55c11942012-11-07 00:45:48 +00004864#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004865int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4866{
4867 struct bnx2x *bp = netdev_priv(dev);
4868 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4869
4870 switch (type) {
4871 case NETDEV_FCOE_WWNN:
4872 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4873 cp->fcoe_wwn_node_name_lo);
4874 break;
4875 case NETDEV_FCOE_WWPN:
4876 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4877 cp->fcoe_wwn_port_name_lo);
4878 break;
4879 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004880 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004881 return -EINVAL;
4882 }
4883
4884 return 0;
4885}
4886#endif
4887
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004888/* called with rtnl_lock */
4889int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4890{
4891 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004892
Yuval Mintz0650c0b2015-05-04 12:34:12 +03004893 if (pci_num_vf(bp->pdev)) {
4894 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4895 return -EPERM;
4896 }
4897
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004898 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004899 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004900 return -EAGAIN;
4901 }
4902
4903 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004904 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4905 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004906 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004907 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004908
4909 /* This does not race with packet allocation
4910 * because the actual alloc size is
4911 * only updated as part of load
4912 */
4913 dev->mtu = new_mtu;
4914
Yuval Mintz230d00e2015-07-22 09:16:25 +03004915 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4916 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4917
Michał Mirosław66371c42011-04-12 09:38:23 +00004918 return bnx2x_reload_if_running(dev);
4919}
4920
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004921netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004922 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004923{
4924 struct bnx2x *bp = netdev_priv(dev);
4925
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004926 if (pci_num_vf(bp->pdev)) {
4927 netdev_features_t changed = dev->features ^ features;
4928
4929 /* Revert the requested changes in features if they
4930 * would require internal reload of PF in bnx2x_set_features().
4931 */
4932 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4933 features &= ~NETIF_F_RXCSUM;
4934 features |= dev->features & NETIF_F_RXCSUM;
4935 }
4936
4937 if (changed & NETIF_F_LOOPBACK) {
4938 features &= ~NETIF_F_LOOPBACK;
4939 features |= dev->features & NETIF_F_LOOPBACK;
4940 }
4941 }
4942
Michał Mirosław66371c42011-04-12 09:38:23 +00004943 /* TPA requires Rx CSUM offloading */
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004944 if (!(features & NETIF_F_RXCSUM)) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004945 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004946 features &= ~NETIF_F_GRO;
4947 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004948
4949 return features;
4950}
4951
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004952int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004953{
4954 struct bnx2x *bp = netdev_priv(dev);
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004955 netdev_features_t changes = features ^ dev->features;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004956 bool bnx2x_reload = false;
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004957 int rc;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004958
Yuval Mintz909d9fa2015-04-22 12:47:32 +03004959 /* VFs or non SRIOV PFs should be able to change loopback feature */
4960 if (!pci_num_vf(bp->pdev)) {
4961 if (features & NETIF_F_LOOPBACK) {
4962 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4963 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4964 bnx2x_reload = true;
4965 }
4966 } else {
4967 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4968 bp->link_params.loopback_mode = LOOPBACK_NONE;
4969 bnx2x_reload = true;
4970 }
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004971 }
4972 }
4973
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004974 /* if GRO is changed while LRO is enabled, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004975 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4976 changes &= ~NETIF_F_GRO;
Eric Dumazet8802f572013-05-18 07:14:53 +00004977
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004978 /* if GRO is changed while HW TPA is off, don't force a reload */
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004979 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4980 changes &= ~NETIF_F_GRO;
Dmitry Kravkovaebf6242014-08-25 17:48:32 +03004981
Eric Dumazet8802f572013-05-18 07:14:53 +00004982 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004983 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004984
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004985 if (bnx2x_reload) {
Michal Schmidtf8dcb5e2015-04-28 11:34:23 +02004986 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4987 dev->features = features;
4988 rc = bnx2x_reload_if_running(dev);
4989 return rc ? rc : 1;
4990 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004991 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004992 }
4993
Michał Mirosław66371c42011-04-12 09:38:23 +00004994 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004995}
4996
4997void bnx2x_tx_timeout(struct net_device *dev)
4998{
4999 struct bnx2x *bp = netdev_priv(dev);
5000
5001#ifdef BNX2X_STOP_ON_ERROR
5002 if (!bp->panic)
5003 bnx2x_panic();
5004#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00005005
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005006 /* This allows the netif to be shutdown gracefully before resetting */
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005007 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005008}
5009
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005010int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
5011{
5012 struct net_device *dev = pci_get_drvdata(pdev);
5013 struct bnx2x *bp;
5014
5015 if (!dev) {
5016 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5017 return -ENODEV;
5018 }
5019 bp = netdev_priv(dev);
5020
5021 rtnl_lock();
5022
5023 pci_save_state(pdev);
5024
5025 if (!netif_running(dev)) {
5026 rtnl_unlock();
5027 return 0;
5028 }
5029
5030 netif_device_detach(dev);
5031
Yuval Mintz5d07d862012-09-13 02:56:21 +00005032 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005033
5034 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5035
5036 rtnl_unlock();
5037
5038 return 0;
5039}
5040
5041int bnx2x_resume(struct pci_dev *pdev)
5042{
5043 struct net_device *dev = pci_get_drvdata(pdev);
5044 struct bnx2x *bp;
5045 int rc;
5046
5047 if (!dev) {
5048 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5049 return -ENODEV;
5050 }
5051 bp = netdev_priv(dev);
5052
5053 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00005054 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005055 return -EAGAIN;
5056 }
5057
5058 rtnl_lock();
5059
5060 pci_restore_state(pdev);
5061
5062 if (!netif_running(dev)) {
5063 rtnl_unlock();
5064 return 0;
5065 }
5066
5067 bnx2x_set_power_state(bp, PCI_D0);
5068 netif_device_attach(dev);
5069
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005070 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5071
5072 rtnl_unlock();
5073
5074 return rc;
5075}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005076
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005077void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5078 u32 cid)
5079{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03005080 if (!cxt) {
5081 BNX2X_ERR("bad context pointer %p\n", cxt);
5082 return;
5083 }
5084
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005085 /* ustorm cxt validation */
5086 cxt->ustorm_ag_context.cdu_usage =
5087 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5088 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5089 /* xcontext validation */
5090 cxt->xstorm_ag_context.cdu_reserved =
5091 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5092 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5093}
5094
Eric Dumazet1191cb82012-04-27 21:39:21 +00005095static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5096 u8 fw_sb_id, u8 sb_index,
5097 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005098{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005099 u32 addr = BAR_CSTRORM_INTMEM +
5100 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5101 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00005102 DP(NETIF_MSG_IFUP,
5103 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5104 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005105}
5106
Eric Dumazet1191cb82012-04-27 21:39:21 +00005107static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5108 u16 fw_sb_id, u8 sb_index,
5109 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005110{
5111 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5112 u32 addr = BAR_CSTRORM_INTMEM +
5113 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005114 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005115 /* clear and set */
5116 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5117 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00005118 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00005119 DP(NETIF_MSG_IFUP,
5120 "port %x fw_sb_id %d sb_index %d disable %d\n",
5121 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03005122}
5123
5124void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5125 u8 sb_index, u8 disable, u16 usec)
5126{
5127 int port = BP_PORT(bp);
5128 u8 ticks = usec / BNX2X_BTR;
5129
5130 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5131
5132 disable = disable ? 1 : (usec ? 0 : 1);
5133 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5134}
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005135
5136void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5137 u32 verbose)
5138{
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005139 smp_mb__before_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005140 set_bit(flag, &bp->sp_rtnl_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01005141 smp_mb__after_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02005142 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5143 flag);
5144 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5145}
5146EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);