blob: 4b875da1c7ed2afc0eec3854217b4919797f3e20 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Ariel Elior08f6dd82014-05-27 13:11:36 +03009 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030027#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000028#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000031#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
stephen hemmingera8f47eb2014-01-09 22:20:11 -080033static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39{
40 int i;
41
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
47 }
48}
49
50static void bnx2x_add_all_napi(struct bnx2x *bp)
51{
52 int i;
53
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
Michal Schmidt7d0445d2014-02-25 16:04:24 +010064 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
Michal Schmidtff2ad302014-02-25 16:04:25 +010065
66 /* Reduce memory usage in kdump environment by using only one queue */
67 if (reset_devices)
68 nq = 1;
69
Michal Schmidt7d0445d2014-02-25 16:04:24 +010070 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
stephen hemmingera8f47eb2014-01-09 22:20:11 -080072}
73
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000074/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000075 * bnx2x_move_fp - move content of the fastpath structure.
76 *
77 * @bp: driver handle
78 * @from: source FP index
79 * @to: destination FP index
80 *
81 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000082 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000084 * source onto the target. Update txdata pointers and related
85 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000086 */
87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88{
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000091 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000095 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030097 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000098
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
101
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
104 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +0000105
Yuval Mintz34d56262013-08-28 01:13:01 +0300106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
108 */
109 to_fp->tpa_info = old_tpa_info;
110
Barak Witkowski15192a82012-06-19 07:48:28 +0000111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
Merav Sicron65565882012-06-19 07:48:26 +0000117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
120 */
121
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124 (bp)->max_cos;
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 }
129
Yuval Mintz4864a162013-01-10 04:53:39 +0000130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +0000132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000134}
135
Ariel Elior8ca5e172013-01-01 05:22:34 +0000136/**
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
138 *
139 * @bp: driver handle
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
142 *
143 */
144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145{
146 if (IS_PF(bp)) {
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154 "bc %d.%d.%d%s%s",
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000161 }
162}
163
David S. Miller4b87f922013-01-15 15:05:59 -0500164/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166 *
167 * @bp: driver handle
168 * @delta: number of eth queues which were not allocated
169 */
170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171{
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000175 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000176 */
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
181
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 }
186 }
187}
188
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800189int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300190
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000191/* free skb in the packet ring at pos idx
192 * return idx of last bd freed
193 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000197{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203 int nbd;
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100204 u16 split_bd_len = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000205
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
207 prefetch(&skb->end);
208
Merav Sicron51c1a582012-03-18 10:33:38 +0000209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000210 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000211
Ariel Elior6383c0b2011-07-14 08:31:57 +0000212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000213
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215#ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
218 bnx2x_panic();
219 }
220#endif
221 new_cons = nbd + tx_buf->first_bd;
222
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226 /* Skip a parse bd... */
227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
233 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000234 --nbd;
235 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 }
237
Michal Schmidt95e92fd2014-01-09 14:36:27 +0100238 /* unmap first bd */
239 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
240 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
241 DMA_TO_DEVICE);
242
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000243 /* now free frags */
244 while (nbd > 0) {
245
Ariel Elior6383c0b2011-07-14 08:31:57 +0000246 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000247 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
248 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
249 if (--nbd)
250 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
251 }
252
253 /* release skb */
254 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000255 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000256 (*pkts_compl)++;
257 (*bytes_compl) += skb->len;
258 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000259
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000260 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 tx_buf->first_bd = 0;
262 tx_buf->skb = NULL;
263
264 return new_cons;
265}
266
Ariel Elior6383c0b2011-07-14 08:31:57 +0000267int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000269 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000270 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000271 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000272
273#ifdef BNX2X_STOP_ON_ERROR
274 if (unlikely(bp->panic))
275 return -1;
276#endif
277
Ariel Elior6383c0b2011-07-14 08:31:57 +0000278 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
279 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
280 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000281
282 while (sw_cons != hw_cons) {
283 u16 pkt_cons;
284
285 pkt_cons = TX_BD(sw_cons);
286
Merav Sicron51c1a582012-03-18 10:33:38 +0000287 DP(NETIF_MSG_TX_DONE,
288 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000289 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000290
Tom Herbert2df1a702011-11-28 16:33:37 +0000291 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000292 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000293
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000294 sw_cons++;
295 }
296
Tom Herbert2df1a702011-11-28 16:33:37 +0000297 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298
Ariel Elior6383c0b2011-07-14 08:31:57 +0000299 txdata->tx_pkt_cons = sw_cons;
300 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000301
302 /* Need to make the tx_bd_cons update visible to start_xmit()
303 * before checking for netif_tx_queue_stopped(). Without the
304 * memory barrier, there is a small possibility that
305 * start_xmit() will miss it and cause the queue to be stopped
306 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 * On the other hand we need an rmb() here to ensure the proper
308 * ordering of bit testing in the following
309 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000310 */
311 smp_mb();
312
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000313 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000314 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000315 * while it's empty. This could have happen if rx_action() gets
316 * suspended in bnx2x_tx_int() after the condition before
317 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
318 *
319 * stops the queue->sees fresh tx_bd_cons->releases the queue->
320 * sends some packets consuming the whole queue again->
321 * stops the queue
322 */
323
324 __netif_tx_lock(txq, smp_processor_id());
325
326 if ((netif_tx_queue_stopped(txq)) &&
327 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000328 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000329 netif_tx_wake_queue(txq);
330
331 __netif_tx_unlock(txq);
332 }
333 return 0;
334}
335
336static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
337 u16 idx)
338{
339 u16 last_max = fp->last_max_sge;
340
341 if (SUB_S16(idx, last_max) > 0)
342 fp->last_max_sge = idx;
343}
344
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000345static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
346 u16 sge_len,
347 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000348{
349 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000350 u16 last_max, last_elem, first_elem;
351 u16 delta = 0;
352 u16 i;
353
354 if (!sge_len)
355 return;
356
357 /* First mark all used pages */
358 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300359 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000360 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000361
362 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000363 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000364
365 /* Here we assume that the last SGE index is the biggest */
366 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000367 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000368 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000369
370 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300371 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
372 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000373
374 /* If ring is not full */
375 if (last_elem + 1 != first_elem)
376 last_elem++;
377
378 /* Now update the prod */
379 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
380 if (likely(fp->sge_mask[i]))
381 break;
382
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300383 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
384 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000385 }
386
387 if (delta > 0) {
388 fp->rx_sge_prod += delta;
389 /* clear page-end entries */
390 bnx2x_clear_sge_mask_next_elems(fp);
391 }
392
393 DP(NETIF_MSG_RX_STATUS,
394 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
395 fp->last_max_sge, fp->rx_sge_prod);
396}
397
Yuval Mintz2de67432013-01-23 03:21:43 +0000398/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000399 * CQE (calculated by HW).
400 */
401static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000402 const struct eth_fast_path_rx_cqe *cqe,
Tom Herbert5495ab72013-12-19 08:59:08 -0800403 enum pkt_hash_types *rxhash_type)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000404{
Yuval Mintz2de67432013-01-23 03:21:43 +0000405 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000406 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000407 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
408 enum eth_rss_hash_type htype;
409
410 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
Tom Herbert5495ab72013-12-19 08:59:08 -0800411 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
412 (htype == TCP_IPV6_HASH_TYPE)) ?
413 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
414
Eric Dumazete52fcb22011-11-14 06:05:34 +0000415 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000416 }
Tom Herbert5495ab72013-12-19 08:59:08 -0800417 *rxhash_type = PKT_HASH_TYPE_NONE;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000418 return 0;
419}
420
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000421static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000422 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300423 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424{
425 struct bnx2x *bp = fp->bp;
426 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
427 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
428 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
429 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300430 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
431 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000432
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300433 /* print error if current state != stop */
434 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000435 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
436
Eric Dumazete52fcb22011-11-14 06:05:34 +0000437 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300438 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000439 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 fp->rx_buf_size, DMA_FROM_DEVICE);
441 /*
442 * ...if it fails - move the skb from the consumer to the producer
443 * and set the current aggregation state as ERROR to drop it
444 * when TPA_STOP arrives.
445 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000446
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300447 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
448 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000449 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300450 tpa_info->tpa_state = BNX2X_TPA_ERROR;
451 return;
452 }
453
Eric Dumazete52fcb22011-11-14 06:05:34 +0000454 /* move empty data from pool to prod */
455 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300456 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000457 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000458 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
459 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
460
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300461 /* move partial skb from cons to pool (don't unmap yet) */
462 *first_buf = *cons_rx_buf;
463
464 /* mark bin state as START */
465 tpa_info->parsing_flags =
466 le16_to_cpu(cqe->pars_flags.flags);
467 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
468 tpa_info->tpa_state = BNX2X_TPA_START;
469 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
470 tpa_info->placement_offset = cqe->placement_offset;
Tom Herbert5495ab72013-12-19 08:59:08 -0800471 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000472 if (fp->mode == TPA_MODE_GRO) {
473 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000474 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000475 tpa_info->gro_size = gro_size;
476 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300477
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000478#ifdef BNX2X_STOP_ON_ERROR
479 fp->tpa_queue_used |= (1 << queue);
480#ifdef _ASM_GENERIC_INT_L64_H
481 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
482#else
483 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
484#endif
485 fp->tpa_queue_used);
486#endif
487}
488
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000489/* Timestamp option length allowed for TPA aggregation:
490 *
491 * nop nop kind length echo val
492 */
493#define TPA_TSTAMP_OPT_LEN 12
494/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000495 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000496 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000497 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000498 * @parsing_flags: parsing flags from the START CQE
499 * @len_on_bd: total length of the first packet for the
500 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000501 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000502 *
503 * Approximate value of the MSS for this aggregation calculated using
504 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000505 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000506 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000507static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000508 u16 len_on_bd, unsigned int pkt_len,
509 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000510{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000511 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300512 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000513 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300514 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
515
516 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000517 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300518 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000519 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
520 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300521 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
523 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000524
525 /* Check if there was a TCP timestamp, if there is it's will
526 * always be 12 bytes length: nop nop kind length echo val.
527 *
528 * Otherwise FW would close the aggregation.
529 */
530 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
531 hdrs_len += TPA_TSTAMP_OPT_LEN;
532
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000533 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
534
535 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
536 * to skb_shinfo(skb)->gso_segs
537 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000538 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000539}
540
Michal Schmidt996dedb2013-09-05 22:13:09 +0200541static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
542 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000543{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200544 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000545 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
546 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
547 dma_addr_t mapping;
548
549 if (unlikely(page == NULL)) {
550 BNX2X_ERR("Can't alloc sge\n");
551 return -ENOMEM;
552 }
553
554 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000555 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000556 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
557 __free_pages(page, PAGES_PER_SGE_SHIFT);
558 BNX2X_ERR("Can't map sge\n");
559 return -ENOMEM;
560 }
561
562 sw_buf->page = page;
563 dma_unmap_addr_set(sw_buf, mapping, mapping);
564
565 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
566 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
567
568 return 0;
569}
570
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000571static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000572 struct bnx2x_agg_info *tpa_info,
573 u16 pages,
574 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300575 struct eth_end_agg_rx_cqe *cqe,
576 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000577{
578 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000579 u32 i, frag_len, frag_size;
580 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300581 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000582 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000583
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300584 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000585
586 if (fp->mode == TPA_MODE_GRO) {
587 gro_size = tpa_info->gro_size;
588 full_page = tpa_info->full_page;
589 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000590
591 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000592 if (frag_size)
593 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000594 le16_to_cpu(cqe->pkt_len),
595 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000596
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000598 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000599 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
600 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300601 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000602 bnx2x_panic();
603 return -EINVAL;
604 }
605#endif
606
607 /* Run through the SGL and compose the fragmented skb */
608 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300609 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000610
611 /* FW gives the indices of the SGE as if the ring is an array
612 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000613 if (fp->mode == TPA_MODE_GRO)
614 frag_len = min_t(u32, frag_size, (u32)full_page);
615 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000616 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000617
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000618 rx_pg = &fp->rx_page_ring[sge_idx];
619 old_rx_pg = *rx_pg;
620
621 /* If we fail to allocate a substitute page, we simply stop
622 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200623 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000624 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000625 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000626 return err;
627 }
628
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000629 /* Unmap the page as we're going to pass it to the stack */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000630 dma_unmap_page(&bp->pdev->dev,
631 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000632 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000633 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000634 if (fp->mode == TPA_MODE_LRO)
635 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
636 else { /* GRO */
637 int rem;
638 int offset = 0;
639 for (rem = frag_len; rem > 0; rem -= gro_size) {
640 int len = rem > gro_size ? gro_size : rem;
641 skb_fill_page_desc(skb, frag_id++,
642 old_rx_pg.page, offset, len);
643 if (offset)
644 get_page(old_rx_pg.page);
645 offset += len;
646 }
647 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000648
649 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000650 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000651 skb->len += frag_len;
652
653 frag_size -= frag_len;
654 }
655
656 return 0;
657}
658
Eric Dumazetd46d1322012-12-10 12:16:06 +0000659static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
660{
661 if (fp->rx_frag_size)
662 put_page(virt_to_head_page(data));
663 else
664 kfree(data);
665}
666
Michal Schmidt996dedb2013-09-05 22:13:09 +0200667static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000668{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200669 if (fp->rx_frag_size) {
670 /* GFP_KERNEL allocations are used only during initialization */
671 if (unlikely(gfp_mask & __GFP_WAIT))
672 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000673
Michal Schmidt996dedb2013-09-05 22:13:09 +0200674 return netdev_alloc_frag(fp->rx_frag_size);
675 }
676
677 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000678}
679
Yuval Mintz99690852013-01-14 05:11:49 +0000680#ifdef CONFIG_INET
681static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
682{
683 const struct iphdr *iph = ip_hdr(skb);
684 struct tcphdr *th;
685
686 skb_set_transport_header(skb, sizeof(struct iphdr));
687 th = tcp_hdr(skb);
688
689 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
690 iph->saddr, iph->daddr, 0);
691}
692
693static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
694{
695 struct ipv6hdr *iph = ipv6_hdr(skb);
696 struct tcphdr *th;
697
698 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
699 th = tcp_hdr(skb);
700
701 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
702 &iph->saddr, &iph->daddr, 0);
703}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000704
705static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
706 void (*gro_func)(struct bnx2x*, struct sk_buff*))
707{
708 skb_set_network_header(skb, 0);
709 gro_func(bp, skb);
710 tcp_gro_complete(skb);
711}
Yuval Mintz99690852013-01-14 05:11:49 +0000712#endif
713
714static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
715 struct sk_buff *skb)
716{
717#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000718 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000719 switch (be16_to_cpu(skb->protocol)) {
720 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000721 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000722 break;
723 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000725 break;
726 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000727 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000728 be16_to_cpu(skb->protocol));
729 }
Yuval Mintz99690852013-01-14 05:11:49 +0000730 }
731#endif
Eric Dumazet60e66fe2013-10-12 14:08:34 -0700732 skb_record_rx_queue(skb, fp->rx_queue);
Yuval Mintz99690852013-01-14 05:11:49 +0000733 napi_gro_receive(&fp->napi, skb);
734}
735
Eric Dumazet1191cb82012-04-27 21:39:21 +0000736static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737 struct bnx2x_agg_info *tpa_info,
738 u16 pages,
739 struct eth_end_agg_rx_cqe *cqe,
740 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300742 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000743 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300744 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000745 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000746 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300747 u8 old_tpa_state = tpa_info->tpa_state;
748
749 tpa_info->tpa_state = BNX2X_TPA_STOP;
750
751 /* If we there was an error during the handling of the TPA_START -
752 * drop this aggregation.
753 */
754 if (old_tpa_state == BNX2X_TPA_ERROR)
755 goto drop;
756
Eric Dumazete52fcb22011-11-14 06:05:34 +0000757 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200758 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000759 /* Unmap skb in the pool anyway, as we are going to change
760 pool entry status to BNX2X_TPA_STOP even if new skb allocation
761 fails. */
762 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800763 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000764 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000765 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000766
Eric Dumazete52fcb22011-11-14 06:05:34 +0000767 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000768#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800769 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000770 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800771 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000772 bnx2x_panic();
773 return;
774 }
775#endif
776
Eric Dumazete52fcb22011-11-14 06:05:34 +0000777 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000778 skb_put(skb, len);
Tom Herbert5495ab72013-12-19 08:59:08 -0800779 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000780
781 skb->protocol = eth_type_trans(skb, bp->dev);
782 skb->ip_summed = CHECKSUM_UNNECESSARY;
783
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000784 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
785 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300786 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000787 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000788 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000789 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000790 DP(NETIF_MSG_RX_STATUS,
791 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000792 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000793 }
794
Eric Dumazete52fcb22011-11-14 06:05:34 +0000795 /* put new data in bin */
796 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000797
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300798 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000799 }
Eric Dumazet07b0f002014-06-26 00:44:02 -0700800 if (new_data)
801 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300802drop:
803 /* drop the packet and keep the buffer in the bin */
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000806 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000807}
808
Michal Schmidt996dedb2013-09-05 22:13:09 +0200809static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
810 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000811{
812 u8 *data;
813 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
814 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
815 dma_addr_t mapping;
816
Michal Schmidt996dedb2013-09-05 22:13:09 +0200817 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000818 if (unlikely(data == NULL))
819 return -ENOMEM;
820
821 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
822 fp->rx_buf_size,
823 DMA_FROM_DEVICE);
824 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000825 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000826 BNX2X_ERR("Can't map rx data\n");
827 return -ENOMEM;
828 }
829
830 rx_buf->data = data;
831 dma_unmap_addr_set(rx_buf, mapping, mapping);
832
833 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
834 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
835
836 return 0;
837}
838
Barak Witkowski15192a82012-06-19 07:48:28 +0000839static
840void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
841 struct bnx2x_fastpath *fp,
842 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000843{
Michal Schmidte4889212012-09-13 12:59:44 +0000844 /* Do nothing if no L4 csum validation was done.
845 * We do not check whether IP csum was validated. For IPv4 we assume
846 * that if the card got as far as validating the L4 csum, it also
847 * validated the IP csum. IPv6 has no IP csum.
848 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000849 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000850 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000851 return;
852
Michal Schmidte4889212012-09-13 12:59:44 +0000853 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000854
855 if (cqe->fast_path_cqe.type_error_flags &
856 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
857 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000858 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000859 else
860 skb->ip_summed = CHECKSUM_UNNECESSARY;
861}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000862
stephen hemmingera8f47eb2014-01-09 22:20:11 -0800863static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000864{
865 struct bnx2x *bp = fp->bp;
866 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300867 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000868 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300869 union eth_rx_cqe *cqe;
870 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000871
872#ifdef BNX2X_STOP_ON_ERROR
873 if (unlikely(bp->panic))
874 return 0;
875#endif
Eric W. Biedermanb3529742014-03-14 17:57:59 -0700876 if (budget <= 0)
877 return rx_pkt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000878
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000879 bd_cons = fp->rx_bd_cons;
880 bd_prod = fp->rx_bd_prod;
881 bd_prod_fw = bd_prod;
882 sw_comp_cons = fp->rx_comp_cons;
883 sw_comp_prod = fp->rx_comp_prod;
884
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300885 comp_ring_cons = RCQ_BD(sw_comp_cons);
886 cqe = &fp->rx_comp_ring[comp_ring_cons];
887 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000888
889 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300890 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000891
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300892 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000893 struct sw_rx_bd *rx_buf = NULL;
894 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000895 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300896 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000897 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000898 u8 *data;
Tom Herbertbd5cef02013-12-17 23:23:11 -0800899 u32 rxhash;
Tom Herbert5495ab72013-12-19 08:59:08 -0800900 enum pkt_hash_types rxhash_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000901
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300902#ifdef BNX2X_STOP_ON_ERROR
903 if (unlikely(bp->panic))
904 return 0;
905#endif
906
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000907 bd_prod = RX_BD(bd_prod);
908 bd_cons = RX_BD(bd_cons);
909
wenxiong@linux.vnet.ibm.com9aaae042014-06-03 14:14:46 -0500910 /* A rmb() is required to ensure that the CQE is not read
911 * before it is written by the adapter DMA. PCI ordering
912 * rules will make sure the other fields are written before
913 * the marker at the end of struct eth_fast_path_rx_cqe
914 * but without rmb() a weakly ordered processor can process
915 * stale data. Without the barrier TPA state-machine might
916 * enter inconsistent state and kernel stack might be
917 * provided with incorrect packet description - these lead
918 * to various kernel crashed.
919 */
920 rmb();
921
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300922 cqe_fp_flags = cqe_fp->type_error_flags;
923 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000924
Merav Sicron51c1a582012-03-18 10:33:38 +0000925 DP(NETIF_MSG_RX_STATUS,
926 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
927 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300928 cqe_fp_flags, cqe_fp->status_flags,
929 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000930 le16_to_cpu(cqe_fp->vlan_tag),
931 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000932
933 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300934 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000935 bnx2x_sp_event(fp, cqe);
936 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000937 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000938
Eric Dumazete52fcb22011-11-14 06:05:34 +0000939 rx_buf = &fp->rx_buf_ring[bd_cons];
940 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000941
Eric Dumazete52fcb22011-11-14 06:05:34 +0000942 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000943 struct bnx2x_agg_info *tpa_info;
944 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300945#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000946 /* sanity check */
947 if (fp->disable_tpa &&
948 (CQE_TYPE_START(cqe_fp_type) ||
949 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000950 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000951 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300952#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000953
Eric Dumazete52fcb22011-11-14 06:05:34 +0000954 if (CQE_TYPE_START(cqe_fp_type)) {
955 u16 queue = cqe_fp->queue_index;
956 DP(NETIF_MSG_RX_STATUS,
957 "calling tpa_start on queue %d\n",
958 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000959
Eric Dumazete52fcb22011-11-14 06:05:34 +0000960 bnx2x_tpa_start(fp, queue,
961 bd_cons, bd_prod,
962 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000963
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000964 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000965 }
966 queue = cqe->end_agg_cqe.queue_index;
967 tpa_info = &fp->tpa_info[queue];
968 DP(NETIF_MSG_RX_STATUS,
969 "calling tpa_stop on queue %d\n",
970 queue);
971
972 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
973 tpa_info->len_on_bd;
974
975 if (fp->mode == TPA_MODE_GRO)
976 pages = (frag_size + tpa_info->full_page - 1) /
977 tpa_info->full_page;
978 else
979 pages = SGE_PAGE_ALIGN(frag_size) >>
980 SGE_PAGE_SHIFT;
981
982 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
983 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000985 if (bp->panic)
986 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000987#endif
988
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000989 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
990 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000991 }
992 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000993 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000994 pad = cqe_fp->placement_offset;
995 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000996 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000997 pad + RX_COPY_THRESH,
998 DMA_FROM_DEVICE);
999 pad += NET_SKB_PAD;
1000 prefetch(data + pad); /* speedup eth_type_trans() */
1001 /* is this an error packet? */
1002 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001003 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001004 "ERROR flags %x rx packet %u\n",
1005 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +00001006 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001007 goto reuse_rx;
1008 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009
Eric Dumazete52fcb22011-11-14 06:05:34 +00001010 /* Since we don't have a jumbo ring
1011 * copy small packets if mtu > 1500
1012 */
1013 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1014 (len <= RX_COPY_THRESH)) {
1015 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1016 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001018 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001019 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001020 goto reuse_rx;
1021 }
Eric Dumazete52fcb22011-11-14 06:05:34 +00001022 memcpy(skb->data, data + pad, len);
1023 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1024 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +02001025 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1026 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001027 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +00001028 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001029 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001030 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001031 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001032 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +00001033 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +00001034 bnx2x_fp_qstats(bp, fp)->
1035 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +00001036 goto next_rx;
1037 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001038 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001039 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +00001040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +00001042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001043reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001044 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001045 goto next_rx;
1046 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001047 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001048
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001049 skb_put(skb, len);
1050 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001051
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001052 /* Set Toeplitz hash for a none-LRO skb */
Tom Herbert5495ab72013-12-19 08:59:08 -08001053 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1054 skb_set_hash(skb, rxhash, rxhash_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001055
Dmitry Kravkov036d2df2011-12-12 23:40:53 +00001056 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001057
Eric Dumazetd6cb3e42012-06-12 23:50:04 +00001058 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +00001059 bnx2x_csum_validate(skb, cqe, fp,
1060 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061
Dmitry Kravkovf233caf2011-11-13 04:34:22 +00001062 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001063
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001064 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1065 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001066 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001067 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001068
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001069 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001070
1071 if (bnx2x_fp_ll_polling(fp))
1072 netif_receive_skb(skb);
1073 else
1074 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001076 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077
1078 bd_cons = NEXT_RX_IDX(bd_cons);
1079 bd_prod = NEXT_RX_IDX(bd_prod);
1080 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1081 rx_pkt++;
1082next_cqe:
1083 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1084 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1085
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001086 /* mark CQE as free */
1087 BNX2X_SEED_CQE(cqe_fp);
1088
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089 if (rx_pkt == budget)
1090 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001091
1092 comp_ring_cons = RCQ_BD(sw_comp_cons);
1093 cqe = &fp->rx_comp_ring[comp_ring_cons];
1094 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001095 } /* while */
1096
1097 fp->rx_bd_cons = bd_cons;
1098 fp->rx_bd_prod = bd_prod_fw;
1099 fp->rx_comp_cons = sw_comp_cons;
1100 fp->rx_comp_prod = sw_comp_prod;
1101
1102 /* Update producers */
1103 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1104 fp->rx_sge_prod);
1105
1106 fp->rx_pkt += rx_pkt;
1107 fp->rx_calls++;
1108
1109 return rx_pkt;
1110}
1111
1112static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1113{
1114 struct bnx2x_fastpath *fp = fp_cookie;
1115 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001116 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001117
Merav Sicron51c1a582012-03-18 10:33:38 +00001118 DP(NETIF_MSG_INTR,
1119 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001120 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001121
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001122 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001123
1124#ifdef BNX2X_STOP_ON_ERROR
1125 if (unlikely(bp->panic))
1126 return IRQ_HANDLED;
1127#endif
1128
1129 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001130 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001131 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001132
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001133 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001134 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1135
1136 return IRQ_HANDLED;
1137}
1138
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001139/* HW Lock for shared dual port PHYs */
1140void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1141{
1142 mutex_lock(&bp->port.phy_mutex);
1143
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001144 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001145}
1146
1147void bnx2x_release_phy_lock(struct bnx2x *bp)
1148{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001149 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001150
1151 mutex_unlock(&bp->port.phy_mutex);
1152}
1153
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001154/* calculates MF speed according to current linespeed and MF configuration */
1155u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1156{
1157 u16 line_speed = bp->link_vars.line_speed;
1158 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001159 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1160 bp->mf_config[BP_VN(bp)]);
1161
1162 /* Calculate the current MAX line speed limit for the MF
1163 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001164 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001165 if (IS_MF_SI(bp))
1166 line_speed = (line_speed * maxCfg) / 100;
1167 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001168 u16 vn_max_rate = maxCfg * 100;
1169
1170 if (vn_max_rate < line_speed)
1171 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001172 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001173 }
1174
1175 return line_speed;
1176}
1177
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001178/**
1179 * bnx2x_fill_report_data - fill link report data to report
1180 *
1181 * @bp: driver handle
1182 * @data: link state to update
1183 *
1184 * It uses a none-atomic bit operations because is called under the mutex.
1185 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001186static void bnx2x_fill_report_data(struct bnx2x *bp,
1187 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001188{
1189 u16 line_speed = bnx2x_get_mf_speed(bp);
1190
1191 memset(data, 0, sizeof(*data));
1192
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001193 /* Fill the report data: effective line speed */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001194 data->line_speed = line_speed;
1195
1196 /* Link is down */
1197 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1198 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1199 &data->link_report_flags);
1200
1201 /* Full DUPLEX */
1202 if (bp->link_vars.duplex == DUPLEX_FULL)
1203 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1204
1205 /* Rx Flow Control is ON */
1206 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1207 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1208
1209 /* Tx Flow Control is ON */
1210 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1211 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1212}
1213
1214/**
1215 * bnx2x_link_report - report link status to OS.
1216 *
1217 * @bp: driver handle
1218 *
1219 * Calls the __bnx2x_link_report() under the same locking scheme
1220 * as a link/PHY state managing code to ensure a consistent link
1221 * reporting.
1222 */
1223
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001224void bnx2x_link_report(struct bnx2x *bp)
1225{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001226 bnx2x_acquire_phy_lock(bp);
1227 __bnx2x_link_report(bp);
1228 bnx2x_release_phy_lock(bp);
1229}
1230
1231/**
1232 * __bnx2x_link_report - report link status to OS.
1233 *
1234 * @bp: driver handle
1235 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001236 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001237 * Should be called under the phy_lock.
1238 */
1239void __bnx2x_link_report(struct bnx2x *bp)
1240{
1241 struct bnx2x_link_report_data cur_data;
1242
1243 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001244 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001245 bnx2x_read_mf_cfg(bp);
1246
1247 /* Read the current link report info */
1248 bnx2x_fill_report_data(bp, &cur_data);
1249
1250 /* Don't report link down or exactly the same link status twice */
1251 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1252 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1253 &bp->last_reported_link.link_report_flags) &&
1254 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1255 &cur_data.link_report_flags)))
1256 return;
1257
1258 bp->link_cnt++;
1259
1260 /* We are going to report a new link parameters now -
1261 * remember the current data for the next time.
1262 */
1263 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1264
1265 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1266 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001267 netif_carrier_off(bp->dev);
1268 netdev_err(bp->dev, "NIC Link is Down\n");
1269 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001270 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001271 const char *duplex;
1272 const char *flow;
1273
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001274 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001275
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001276 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1277 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001278 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001279 else
Joe Perches94f05b02011-08-14 12:16:20 +00001280 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001281
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001282 /* Handle the FC at the end so that only these flags would be
1283 * possibly set. This way we may easily check if there is no FC
1284 * enabled.
1285 */
1286 if (cur_data.link_report_flags) {
1287 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1288 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001289 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1290 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001291 flow = "ON - receive & transmit";
1292 else
1293 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001294 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001295 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001296 }
Joe Perches94f05b02011-08-14 12:16:20 +00001297 } else {
1298 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001299 }
Joe Perches94f05b02011-08-14 12:16:20 +00001300 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1301 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302 }
1303}
1304
Eric Dumazet1191cb82012-04-27 21:39:21 +00001305static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1306{
1307 int i;
1308
1309 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1310 struct eth_rx_sge *sge;
1311
1312 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1313 sge->addr_hi =
1314 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1315 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1316
1317 sge->addr_lo =
1318 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1319 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1320 }
1321}
1322
1323static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1324 struct bnx2x_fastpath *fp, int last)
1325{
1326 int i;
1327
1328 for (i = 0; i < last; i++) {
1329 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1330 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1331 u8 *data = first_buf->data;
1332
1333 if (data == NULL) {
1334 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1335 continue;
1336 }
1337 if (tpa_info->tpa_state == BNX2X_TPA_START)
1338 dma_unmap_single(&bp->pdev->dev,
1339 dma_unmap_addr(first_buf, mapping),
1340 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001341 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001342 first_buf->data = NULL;
1343 }
1344}
1345
Merav Sicron55c11942012-11-07 00:45:48 +00001346void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1347{
1348 int j;
1349
1350 for_each_rx_queue_cnic(bp, j) {
1351 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353 fp->rx_bd_cons = 0;
1354
1355 /* Activate BD ring */
1356 /* Warning!
1357 * this will generate an interrupt (to the TSTORM)
1358 * must only be done after chip is initialized
1359 */
1360 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361 fp->rx_sge_prod);
1362 }
1363}
1364
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001365void bnx2x_init_rx_rings(struct bnx2x *bp)
1366{
1367 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001368 u16 ring_prod;
1369 int i, j;
1370
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001371 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001372 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001373 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001375 DP(NETIF_MSG_IFUP,
1376 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1377
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001378 if (!fp->disable_tpa) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001379 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001380 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001381 struct bnx2x_agg_info *tpa_info =
1382 &fp->tpa_info[i];
1383 struct sw_rx_bd *first_buf =
1384 &tpa_info->first_buf;
1385
Michal Schmidt996dedb2013-09-05 22:13:09 +02001386 first_buf->data =
1387 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001388 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001389 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1390 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001391 bnx2x_free_tpa_pool(bp, fp, i);
1392 fp->disable_tpa = 1;
1393 break;
1394 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001395 dma_unmap_addr_set(first_buf, mapping, 0);
1396 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001397 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001398
1399 /* "next page" elements initialization */
1400 bnx2x_set_next_page_sgl(fp);
1401
1402 /* set SGEs bit mask */
1403 bnx2x_init_sge_ring_bit_mask(fp);
1404
1405 /* Allocate SGEs and initialize the ring elements */
1406 for (i = 0, ring_prod = 0;
1407 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1408
Michal Schmidt996dedb2013-09-05 22:13:09 +02001409 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1410 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001411 BNX2X_ERR("was only able to allocate %d rx sges\n",
1412 i);
1413 BNX2X_ERR("disabling TPA for queue[%d]\n",
1414 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001415 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001416 bnx2x_free_rx_sge_range(bp, fp,
1417 ring_prod);
1418 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001419 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001420 fp->disable_tpa = 1;
1421 ring_prod = 0;
1422 break;
1423 }
1424 ring_prod = NEXT_SGE_IDX(ring_prod);
1425 }
1426
1427 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001428 }
1429 }
1430
Merav Sicron55c11942012-11-07 00:45:48 +00001431 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001432 struct bnx2x_fastpath *fp = &bp->fp[j];
1433
1434 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001435
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001436 /* Activate BD ring */
1437 /* Warning!
1438 * this will generate an interrupt (to the TSTORM)
1439 * must only be done after chip is initialized
1440 */
1441 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1442 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001444 if (j != 0)
1445 continue;
1446
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001447 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001448 REG_WR(bp, BAR_USTRORM_INTMEM +
1449 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1450 U64_LO(fp->rx_comp_mapping));
1451 REG_WR(bp, BAR_USTRORM_INTMEM +
1452 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1453 U64_HI(fp->rx_comp_mapping));
1454 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001455 }
1456}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001457
Merav Sicron55c11942012-11-07 00:45:48 +00001458static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1459{
1460 u8 cos;
1461 struct bnx2x *bp = fp->bp;
1462
1463 for_each_cos_in_tx_queue(fp, cos) {
1464 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1465 unsigned pkts_compl = 0, bytes_compl = 0;
1466
1467 u16 sw_prod = txdata->tx_pkt_prod;
1468 u16 sw_cons = txdata->tx_pkt_cons;
1469
1470 while (sw_cons != sw_prod) {
1471 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1472 &pkts_compl, &bytes_compl);
1473 sw_cons++;
1474 }
1475
1476 netdev_tx_reset_queue(
1477 netdev_get_tx_queue(bp->dev,
1478 txdata->txq_index));
1479 }
1480}
1481
1482static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1483{
1484 int i;
1485
1486 for_each_tx_queue_cnic(bp, i) {
1487 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1488 }
1489}
1490
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001491static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1492{
1493 int i;
1494
Merav Sicron55c11942012-11-07 00:45:48 +00001495 for_each_eth_queue(bp, i) {
1496 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001497 }
1498}
1499
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001500static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1501{
1502 struct bnx2x *bp = fp->bp;
1503 int i;
1504
1505 /* ring wasn't allocated */
1506 if (fp->rx_buf_ring == NULL)
1507 return;
1508
1509 for (i = 0; i < NUM_RX_BD; i++) {
1510 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001511 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001512
Eric Dumazete52fcb22011-11-14 06:05:34 +00001513 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001514 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001515 dma_unmap_single(&bp->pdev->dev,
1516 dma_unmap_addr(rx_buf, mapping),
1517 fp->rx_buf_size, DMA_FROM_DEVICE);
1518
Eric Dumazete52fcb22011-11-14 06:05:34 +00001519 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001520 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001521 }
1522}
1523
Merav Sicron55c11942012-11-07 00:45:48 +00001524static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1525{
1526 int j;
1527
1528 for_each_rx_queue_cnic(bp, j) {
1529 bnx2x_free_rx_bds(&bp->fp[j]);
1530 }
1531}
1532
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001533static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1534{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001535 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001536
Merav Sicron55c11942012-11-07 00:45:48 +00001537 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001538 struct bnx2x_fastpath *fp = &bp->fp[j];
1539
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001540 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001541
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001542 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001543 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001544 }
1545}
1546
stephen hemmingera8f47eb2014-01-09 22:20:11 -08001547static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00001548{
1549 bnx2x_free_tx_skbs_cnic(bp);
1550 bnx2x_free_rx_skbs_cnic(bp);
1551}
1552
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001553void bnx2x_free_skbs(struct bnx2x *bp)
1554{
1555 bnx2x_free_tx_skbs(bp);
1556 bnx2x_free_rx_skbs(bp);
1557}
1558
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001559void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1560{
1561 /* load old values */
1562 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1563
1564 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1565 /* leave all but MAX value */
1566 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1567
1568 /* set new MAX value */
1569 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1570 & FUNC_MF_CFG_MAX_BW_MASK;
1571
1572 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1573 }
1574}
1575
Dmitry Kravkovca924292011-06-14 01:33:08 +00001576/**
1577 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1578 *
1579 * @bp: driver handle
1580 * @nvecs: number of vectors to be released
1581 */
1582static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001583{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001584 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001585
Dmitry Kravkovca924292011-06-14 01:33:08 +00001586 if (nvecs == offset)
1587 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001588
1589 /* VFs don't have a default SB */
1590 if (IS_PF(bp)) {
1591 free_irq(bp->msix_table[offset].vector, bp->dev);
1592 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1593 bp->msix_table[offset].vector);
1594 offset++;
1595 }
Merav Sicron55c11942012-11-07 00:45:48 +00001596
1597 if (CNIC_SUPPORT(bp)) {
1598 if (nvecs == offset)
1599 return;
1600 offset++;
1601 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001602
Dmitry Kravkovca924292011-06-14 01:33:08 +00001603 for_each_eth_queue(bp, i) {
1604 if (nvecs == offset)
1605 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001606 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1607 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001608
1609 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001610 }
1611}
1612
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001613void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001614{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001615 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001616 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1617 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1618
1619 /* vfs don't have a default status block */
1620 if (IS_PF(bp))
1621 nvecs++;
1622
1623 bnx2x_free_msix_irqs(bp, nvecs);
1624 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001625 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001626 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001627}
1628
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001629int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001630{
Ariel Elior1ab44342013-01-01 05:22:23 +00001631 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001632
Ariel Elior1ab44342013-01-01 05:22:23 +00001633 /* VFs don't have a default status block */
1634 if (IS_PF(bp)) {
1635 bp->msix_table[msix_vec].entry = msix_vec;
1636 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1637 bp->msix_table[0].entry);
1638 msix_vec++;
1639 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001640
Merav Sicron55c11942012-11-07 00:45:48 +00001641 /* Cnic requires an msix vector for itself */
1642 if (CNIC_SUPPORT(bp)) {
1643 bp->msix_table[msix_vec].entry = msix_vec;
1644 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1645 msix_vec, bp->msix_table[msix_vec].entry);
1646 msix_vec++;
1647 }
1648
Ariel Elior6383c0b2011-07-14 08:31:57 +00001649 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001650 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001651 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001652 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1653 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001654 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001655 }
1656
Ariel Elior1ab44342013-01-01 05:22:23 +00001657 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1658 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001659
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001660 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1661 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001662 /*
1663 * reconfigure number of tx/rx queues according to available
1664 * MSI-X vectors
1665 */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001666 if (rc == -ENOSPC) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001667 /* Get by with single vector */
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001668 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1669 if (rc < 0) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001670 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1671 rc);
1672 goto no_msix;
1673 }
1674
1675 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1676 bp->flags |= USING_SINGLE_MSIX_FLAG;
1677
Merav Sicron55c11942012-11-07 00:45:48 +00001678 BNX2X_DEV_INFO("set number of queues to 1\n");
1679 bp->num_ethernet_queues = 1;
1680 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001681 } else if (rc < 0) {
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001682 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001683 goto no_msix;
Alexander Gordeeva5444b12014-02-18 11:07:54 +01001684 } else if (rc < msix_vec) {
1685 /* how less vectors we will have? */
1686 int diff = msix_vec - rc;
1687
1688 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1689
1690 /*
1691 * decrease number of queues by number of unallocated entries
1692 */
1693 bp->num_ethernet_queues -= diff;
1694 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1695
1696 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1697 bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001698 }
1699
1700 bp->flags |= USING_MSIX_FLAG;
1701
1702 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001703
1704no_msix:
1705 /* fall to INTx if not enough memory */
1706 if (rc == -ENOMEM)
1707 bp->flags |= DISABLE_MSI_FLAG;
1708
1709 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001710}
1711
1712static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1713{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001714 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001715
Ariel Eliorad5afc82013-01-01 05:22:26 +00001716 /* no default status block for vf */
1717 if (IS_PF(bp)) {
1718 rc = request_irq(bp->msix_table[offset++].vector,
1719 bnx2x_msix_sp_int, 0,
1720 bp->dev->name, bp->dev);
1721 if (rc) {
1722 BNX2X_ERR("request sp irq failed\n");
1723 return -EBUSY;
1724 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001725 }
1726
Merav Sicron55c11942012-11-07 00:45:48 +00001727 if (CNIC_SUPPORT(bp))
1728 offset++;
1729
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001730 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001731 struct bnx2x_fastpath *fp = &bp->fp[i];
1732 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1733 bp->dev->name, i);
1734
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001735 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001736 bnx2x_msix_fp_int, 0, fp->name, fp);
1737 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001738 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1739 bp->msix_table[offset].vector, rc);
1740 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001741 return -EBUSY;
1742 }
1743
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001744 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001745 }
1746
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001747 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001748 if (IS_PF(bp)) {
1749 offset = 1 + CNIC_SUPPORT(bp);
1750 netdev_info(bp->dev,
1751 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1752 bp->msix_table[0].vector,
1753 0, bp->msix_table[offset].vector,
1754 i - 1, bp->msix_table[offset + i - 1].vector);
1755 } else {
1756 offset = CNIC_SUPPORT(bp);
1757 netdev_info(bp->dev,
1758 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1759 0, bp->msix_table[offset].vector,
1760 i - 1, bp->msix_table[offset + i - 1].vector);
1761 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001762 return 0;
1763}
1764
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001765int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001766{
1767 int rc;
1768
1769 rc = pci_enable_msi(bp->pdev);
1770 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001771 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772 return -1;
1773 }
1774 bp->flags |= USING_MSI_FLAG;
1775
1776 return 0;
1777}
1778
1779static int bnx2x_req_irq(struct bnx2x *bp)
1780{
1781 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001782 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001783
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001784 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001785 flags = 0;
1786 else
1787 flags = IRQF_SHARED;
1788
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001789 if (bp->flags & USING_MSIX_FLAG)
1790 irq = bp->msix_table[0].vector;
1791 else
1792 irq = bp->pdev->irq;
1793
1794 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001795}
1796
Yuval Mintzc957d092013-06-25 08:50:11 +03001797static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001798{
1799 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001800 if (bp->flags & USING_MSIX_FLAG &&
1801 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001802 rc = bnx2x_req_msix_irqs(bp);
1803 if (rc)
1804 return rc;
1805 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001806 rc = bnx2x_req_irq(bp);
1807 if (rc) {
1808 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1809 return rc;
1810 }
1811 if (bp->flags & USING_MSI_FLAG) {
1812 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001813 netdev_info(bp->dev, "using MSI IRQ %d\n",
1814 bp->dev->irq);
1815 }
1816 if (bp->flags & USING_MSIX_FLAG) {
1817 bp->dev->irq = bp->msix_table[0].vector;
1818 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1819 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001820 }
1821 }
1822
1823 return 0;
1824}
1825
Merav Sicron55c11942012-11-07 00:45:48 +00001826static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1827{
1828 int i;
1829
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001830 for_each_rx_queue_cnic(bp, i) {
1831 bnx2x_fp_init_lock(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001832 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001833 }
Merav Sicron55c11942012-11-07 00:45:48 +00001834}
1835
Eric Dumazet1191cb82012-04-27 21:39:21 +00001836static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001837{
1838 int i;
1839
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001840 for_each_eth_queue(bp, i) {
1841 bnx2x_fp_init_lock(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001842 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001843 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001844}
1845
Merav Sicron55c11942012-11-07 00:45:48 +00001846static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1847{
1848 int i;
1849
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001850 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001851 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001852 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1853 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001854 }
Merav Sicron55c11942012-11-07 00:45:48 +00001855}
1856
Eric Dumazet1191cb82012-04-27 21:39:21 +00001857static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001858{
1859 int i;
1860
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001861 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001862 napi_disable(&bnx2x_fp(bp, i, napi));
Yuval Mintz9a2620c2014-01-07 12:07:41 +02001863 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1864 usleep_range(1000, 2000);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001865 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001866}
1867
1868void bnx2x_netif_start(struct bnx2x *bp)
1869{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001870 if (netif_running(bp->dev)) {
1871 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001872 if (CNIC_LOADED(bp))
1873 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001874 bnx2x_int_enable(bp);
1875 if (bp->state == BNX2X_STATE_OPEN)
1876 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001877 }
1878}
1879
1880void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1881{
1882 bnx2x_int_disable_sync(bp, disable_hw);
1883 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001884 if (CNIC_LOADED(bp))
1885 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001886}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001887
Jason Wangf663dd92014-01-10 16:18:26 +08001888u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001889 void *accel_priv, select_queue_fallback_t fallback)
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001890{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001891 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001892
Merav Sicron55c11942012-11-07 00:45:48 +00001893 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001894 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1895 u16 ether_type = ntohs(hdr->h_proto);
1896
1897 /* Skip VLAN tag if present */
1898 if (ether_type == ETH_P_8021Q) {
1899 struct vlan_ethhdr *vhdr =
1900 (struct vlan_ethhdr *)skb->data;
1901
1902 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1903 }
1904
1905 /* If ethertype is FCoE or FIP - use FCoE ring */
1906 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001907 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001908 }
Merav Sicron55c11942012-11-07 00:45:48 +00001909
David S. Miller823dcd22011-08-20 10:39:12 -07001910 /* select a non-FCoE queue */
Daniel Borkmann99932d42014-02-16 15:55:20 +01001911 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001912}
1913
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001914void bnx2x_set_num_queues(struct bnx2x *bp)
1915{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001916 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001917 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001918
Barak Witkowskia3348722012-04-23 03:04:46 +00001919 /* override in STORAGE SD modes */
1920 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001921 bp->num_ethernet_queues = 1;
1922
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001923 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001924 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1925 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001926
1927 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001928}
1929
David S. Miller823dcd22011-08-20 10:39:12 -07001930/**
1931 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1932 *
1933 * @bp: Driver handle
1934 *
1935 * We currently support for at most 16 Tx queues for each CoS thus we will
1936 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1937 * bp->max_cos.
1938 *
1939 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1940 * index after all ETH L2 indices.
1941 *
1942 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1943 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001944 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001945 *
1946 * The proper configuration of skb->queue_mapping is handled by
1947 * bnx2x_select_queue() and __skb_tx_hash().
1948 *
1949 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1950 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1951 */
Merav Sicron55c11942012-11-07 00:45:48 +00001952static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001953{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001954 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001955
Merav Sicron65565882012-06-19 07:48:26 +00001956 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001957 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001958
1959/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001960 if (include_cnic && !NO_FCOE(bp)) {
1961 rx++;
1962 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001963 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001964
1965 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1966 if (rc) {
1967 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1968 return rc;
1969 }
1970 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1971 if (rc) {
1972 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1973 return rc;
1974 }
1975
Merav Sicron51c1a582012-03-18 10:33:38 +00001976 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001977 tx, rx);
1978
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001979 return rc;
1980}
1981
Eric Dumazet1191cb82012-04-27 21:39:21 +00001982static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001983{
1984 int i;
1985
1986 for_each_queue(bp, i) {
1987 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001988 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001989
1990 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1991 if (IS_FCOE_IDX(i))
1992 /*
1993 * Although there are no IP frames expected to arrive to
1994 * this ring we still want to add an
1995 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1996 * overrun attack.
1997 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001998 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001999 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00002000 mtu = bp->dev->mtu;
2001 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2002 IP_HEADER_ALIGNMENT_PADDING +
2003 ETH_OVREHEAD +
2004 mtu +
2005 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002006 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00002007 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2008 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2009 else
2010 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002011 }
2012}
2013
Ariel Elior60cad4e2013-09-04 14:09:22 +03002014static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002015{
2016 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002017 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2018
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002019 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002020 * enabled
2021 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002022 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2023 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00002024 bp->fp->cl_id +
2025 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002026
2027 /*
2028 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2029 * per-port, so if explicit configuration is needed , do it only
2030 * for a PMF.
2031 *
2032 * For 57712 and newer on the other hand it's a per-function
2033 * configuration.
2034 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002035 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002036}
2037
Ariel Elior60cad4e2013-09-04 14:09:22 +03002038int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2039 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002040{
Yuval Mintz3b603062012-03-18 10:33:39 +00002041 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002042
2043 /* Although RSS is meaningless when there is a single HW queue we
2044 * still need it enabled in order to have HW Rx hash generated.
2045 *
2046 * if (!is_eth_multi(bp))
2047 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2048 */
2049
Dmitry Kravkov96305232012-04-03 18:41:30 +00002050 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002051
2052 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2053
Ariel Elior60cad4e2013-09-04 14:09:22 +03002054 if (enable) {
2055 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002056
Ariel Elior60cad4e2013-09-04 14:09:22 +03002057 /* RSS configuration */
2058 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2059 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2060 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2061 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2062 if (rss_obj->udp_rss_v4)
2063 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2064 if (rss_obj->udp_rss_v6)
2065 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2066 } else {
2067 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2068 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002069
Dmitry Kravkov96305232012-04-03 18:41:30 +00002070 /* Hash bits */
2071 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002072
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002073 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002074
Dmitry Kravkov96305232012-04-03 18:41:30 +00002075 if (config_hash) {
2076 /* RSS keys */
Ariel Elior60cad4e2013-09-04 14:09:22 +03002077 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002078 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002079 }
2080
Ariel Elior60cad4e2013-09-04 14:09:22 +03002081 if (IS_PF(bp))
2082 return bnx2x_config_rss(bp, &params);
2083 else
2084 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002085}
2086
Eric Dumazet1191cb82012-04-27 21:39:21 +00002087static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002088{
Yuval Mintz3b603062012-03-18 10:33:39 +00002089 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002090
2091 /* Prepare parameters for function state transitions */
2092 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2093
2094 func_params.f_obj = &bp->func_obj;
2095 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2096
2097 func_params.params.hw_init.load_phase = load_code;
2098
2099 return bnx2x_func_state_change(bp, &func_params);
2100}
2101
2102/*
2103 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002104 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002105 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002106void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002107{
2108 int rc;
2109 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002110 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002111 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002112
2113 /***************** Cleanup MACs' object first *************************/
2114
2115 /* Wait for completion of requested */
2116 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2117 /* Perform a dry cleanup */
2118 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2119
2120 /* Clean ETH primary MAC */
2121 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002122 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002123 &ramrod_flags);
2124 if (rc != 0)
2125 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2126
2127 /* Cleanup UC list */
2128 vlan_mac_flags = 0;
2129 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2130 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2131 &ramrod_flags);
2132 if (rc != 0)
2133 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2134
2135 /***************** Now clean mcast object *****************************/
2136 rparam.mcast_obj = &bp->mcast_obj;
2137 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2138
Yuval Mintz8b09be52013-08-01 17:30:59 +03002139 /* Add a DEL command... - Since we're doing a driver cleanup only,
2140 * we take a lock surrounding both the initial send and the CONTs,
2141 * as we don't want a true completion to disrupt us in the middle.
2142 */
2143 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002144 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2145 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002146 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2147 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002148
2149 /* ...and wait until all pending commands are cleared */
2150 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2151 while (rc != 0) {
2152 if (rc < 0) {
2153 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2154 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002155 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002156 return;
2157 }
2158
2159 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2160 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002161 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002162}
2163
2164#ifndef BNX2X_STOP_ON_ERROR
2165#define LOAD_ERROR_EXIT(bp, label) \
2166 do { \
2167 (bp)->state = BNX2X_STATE_ERROR; \
2168 goto label; \
2169 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002170
2171#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2172 do { \
2173 bp->cnic_loaded = false; \
2174 goto label; \
2175 } while (0)
2176#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002177#define LOAD_ERROR_EXIT(bp, label) \
2178 do { \
2179 (bp)->state = BNX2X_STATE_ERROR; \
2180 (bp)->panic = 1; \
2181 return -EBUSY; \
2182 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002183#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2184 do { \
2185 bp->cnic_loaded = false; \
2186 (bp)->panic = 1; \
2187 return -EBUSY; \
2188 } while (0)
2189#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002190
Ariel Eliorad5afc82013-01-01 05:22:26 +00002191static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002192{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002193 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2194 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2195 return;
2196}
Yuval Mintz452427b2012-03-26 20:47:07 +00002197
Ariel Eliorad5afc82013-01-01 05:22:26 +00002198static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2199{
Ariel Elior8db573b2013-01-01 05:22:37 +00002200 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002201 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002202
Ariel Eliorad5afc82013-01-01 05:22:26 +00002203 /* number of queues for statistics is number of eth queues + FCoE */
2204 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002205
Ariel Eliorad5afc82013-01-01 05:22:26 +00002206 /* Total number of FW statistics requests =
2207 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2208 * and fcoe l2 queue) stats + num of queues (which includes another 1
2209 * for fcoe l2 queue if applicable)
2210 */
2211 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2212
Ariel Elior8db573b2013-01-01 05:22:37 +00002213 /* vf stats appear in the request list, but their data is allocated by
2214 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2215 * it is used to determine where to place the vf stats queries in the
2216 * request struct
2217 */
2218 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002219 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002220
Ariel Eliorad5afc82013-01-01 05:22:26 +00002221 /* Request is built from stats_query_header and an array of
2222 * stats_query_cmd_group each of which contains
2223 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2224 * configured in the stats_query_header.
2225 */
2226 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002227 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2228 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002229 1 : 0));
2230
Ariel Elior8db573b2013-01-01 05:22:37 +00002231 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2232 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002233 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2234 num_groups * sizeof(struct stats_query_cmd_group);
2235
2236 /* Data for statistics requests + stats_counter
2237 * stats_counter holds per-STORM counters that are incremented
2238 * when STORM has finished with the current request.
2239 * memory for FCoE offloaded statistics are counted anyway,
2240 * even if they will not be sent.
2241 * VF stats are not accounted for here as the data of VF stats is stored
2242 * in memory allocated by the VF, not here.
2243 */
2244 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2245 sizeof(struct per_pf_stats) +
2246 sizeof(struct fcoe_statistics_params) +
2247 sizeof(struct per_queue_stats) * num_queue_stats +
2248 sizeof(struct stats_counter);
2249
Joe Perchescd2b0382014-02-20 13:25:51 -08002250 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2251 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2252 if (!bp->fw_stats)
2253 goto alloc_mem_err;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002254
2255 /* Set shortcuts */
2256 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2257 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2258 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2259 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2260 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2261 bp->fw_stats_req_sz;
2262
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002263 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002264 U64_HI(bp->fw_stats_req_mapping),
2265 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002266 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002267 U64_HI(bp->fw_stats_data_mapping),
2268 U64_LO(bp->fw_stats_data_mapping));
2269 return 0;
2270
2271alloc_mem_err:
2272 bnx2x_free_fw_stats_mem(bp);
2273 BNX2X_ERR("Can't allocate FW stats memory\n");
2274 return -ENOMEM;
2275}
2276
2277/* send load request to mcp and analyze response */
2278static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2279{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002280 u32 param;
2281
Ariel Eliorad5afc82013-01-01 05:22:26 +00002282 /* init fw_seq */
2283 bp->fw_seq =
2284 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2285 DRV_MSG_SEQ_NUMBER_MASK);
2286 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2287
2288 /* Get current FW pulse sequence */
2289 bp->fw_drv_pulse_wr_seq =
2290 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2291 DRV_PULSE_SEQ_MASK);
2292 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2293
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002294 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2295
2296 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2297 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2298
Ariel Eliorad5afc82013-01-01 05:22:26 +00002299 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002300 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002301
2302 /* if mcp fails to respond we must abort */
2303 if (!(*load_code)) {
2304 BNX2X_ERR("MCP response failure, aborting\n");
2305 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002306 }
2307
Ariel Eliorad5afc82013-01-01 05:22:26 +00002308 /* If mcp refused (e.g. other port is in diagnostic mode) we
2309 * must abort
2310 */
2311 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2312 BNX2X_ERR("MCP refused load request, aborting\n");
2313 return -EBUSY;
2314 }
2315 return 0;
2316}
2317
2318/* check whether another PF has already loaded FW to chip. In
2319 * virtualized environments a pf from another VM may have already
2320 * initialized the device including loading FW
2321 */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002322int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002323{
2324 /* is another pf loaded on this engine? */
2325 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2326 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2327 /* build my FW version dword */
2328 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2329 (BCM_5710_FW_MINOR_VERSION << 8) +
2330 (BCM_5710_FW_REVISION_VERSION << 16) +
2331 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2332
2333 /* read loaded FW from chip */
2334 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2335
2336 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2337 loaded_fw, my_fw);
2338
2339 /* abort nic load if version mismatch */
2340 if (my_fw != loaded_fw) {
Yuval Mintz91ebb922013-12-26 09:57:07 +02002341 if (print_err)
2342 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2343 loaded_fw, my_fw);
2344 else
2345 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2346 loaded_fw, my_fw);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002347 return -EBUSY;
2348 }
2349 }
2350 return 0;
2351}
2352
2353/* returns the "mcp load_code" according to global load_count array */
2354static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2355{
2356 int path = BP_PATH(bp);
2357
2358 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002359 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2360 bnx2x_load_count[path][2]);
2361 bnx2x_load_count[path][0]++;
2362 bnx2x_load_count[path][1 + port]++;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002363 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002364 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2365 bnx2x_load_count[path][2]);
2366 if (bnx2x_load_count[path][0] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002367 return FW_MSG_CODE_DRV_LOAD_COMMON;
stephen hemmingera8f47eb2014-01-09 22:20:11 -08002368 else if (bnx2x_load_count[path][1 + port] == 1)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002369 return FW_MSG_CODE_DRV_LOAD_PORT;
2370 else
2371 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2372}
2373
2374/* mark PMF if applicable */
2375static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2376{
2377 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2378 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2379 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2380 bp->port.pmf = 1;
2381 /* We need the barrier to ensure the ordering between the
2382 * writing to bp->port.pmf here and reading it from the
2383 * bnx2x_periodic_task().
2384 */
2385 smp_mb();
2386 } else {
2387 bp->port.pmf = 0;
2388 }
2389
2390 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2391}
2392
2393static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2394{
2395 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2396 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2397 (bp->common.shmem2_base)) {
2398 if (SHMEM2_HAS(bp, dcc_support))
2399 SHMEM2_WR(bp, dcc_support,
2400 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2401 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2402 if (SHMEM2_HAS(bp, afex_driver_support))
2403 SHMEM2_WR(bp, afex_driver_support,
2404 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2405 }
2406
2407 /* Set AFEX default VLAN tag to an invalid value */
2408 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002409}
2410
Eric Dumazet1191cb82012-04-27 21:39:21 +00002411/**
2412 * bnx2x_bz_fp - zero content of the fastpath structure.
2413 *
2414 * @bp: driver handle
2415 * @index: fastpath index to be zeroed
2416 *
2417 * Makes sure the contents of the bp->fp[index].napi is kept
2418 * intact.
2419 */
2420static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2421{
2422 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002423 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002424 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002425 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002426
Eric Dumazet1191cb82012-04-27 21:39:21 +00002427 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002428 if (fp->tpa_info)
2429 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2430 sizeof(struct bnx2x_agg_info));
2431 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002432
2433 /* Restore the NAPI object as it has been already initialized */
2434 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002435 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002436 fp->bp = bp;
2437 fp->index = index;
2438 if (IS_ETH_FP(fp))
2439 fp->max_cos = bp->max_cos;
2440 else
2441 /* Special queues support only one CoS */
2442 fp->max_cos = 1;
2443
Merav Sicron65565882012-06-19 07:48:26 +00002444 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002445 if (IS_FCOE_FP(fp))
2446 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002447 if (IS_ETH_FP(fp))
2448 for_each_cos_in_tx_queue(fp, cos)
2449 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2450 BNX2X_NUM_ETH_QUEUES(bp) + index];
2451
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002452 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002453 * minimal size so it must be set prior to queue memory allocation
2454 */
2455 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2456 (bp->flags & GRO_ENABLE_FLAG &&
2457 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2458 if (bp->flags & TPA_ENABLE_FLAG)
2459 fp->mode = TPA_MODE_LRO;
2460 else if (bp->flags & GRO_ENABLE_FLAG)
2461 fp->mode = TPA_MODE_GRO;
2462
Eric Dumazet1191cb82012-04-27 21:39:21 +00002463 /* We don't want TPA on an FCoE L2 ring */
2464 if (IS_FCOE_FP(fp))
2465 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002466}
2467
2468int bnx2x_load_cnic(struct bnx2x *bp)
2469{
2470 int i, rc, port = BP_PORT(bp);
2471
2472 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2473
2474 mutex_init(&bp->cnic_mutex);
2475
Ariel Eliorad5afc82013-01-01 05:22:26 +00002476 if (IS_PF(bp)) {
2477 rc = bnx2x_alloc_mem_cnic(bp);
2478 if (rc) {
2479 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2480 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2481 }
Merav Sicron55c11942012-11-07 00:45:48 +00002482 }
2483
2484 rc = bnx2x_alloc_fp_mem_cnic(bp);
2485 if (rc) {
2486 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2487 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2488 }
2489
2490 /* Update the number of queues with the cnic queues */
2491 rc = bnx2x_set_real_num_queues(bp, 1);
2492 if (rc) {
2493 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2494 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2495 }
2496
2497 /* Add all CNIC NAPI objects */
2498 bnx2x_add_all_napi_cnic(bp);
2499 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2500 bnx2x_napi_enable_cnic(bp);
2501
2502 rc = bnx2x_init_hw_func_cnic(bp);
2503 if (rc)
2504 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2505
2506 bnx2x_nic_init_cnic(bp);
2507
Ariel Eliorad5afc82013-01-01 05:22:26 +00002508 if (IS_PF(bp)) {
2509 /* Enable Timer scan */
2510 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002511
Ariel Eliorad5afc82013-01-01 05:22:26 +00002512 /* setup cnic queues */
2513 for_each_cnic_queue(bp, i) {
2514 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2515 if (rc) {
2516 BNX2X_ERR("Queue setup failed\n");
2517 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2518 }
Merav Sicron55c11942012-11-07 00:45:48 +00002519 }
2520 }
2521
2522 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002523 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002524
2525 /* re-read iscsi info */
2526 bnx2x_get_iscsi_info(bp);
2527 bnx2x_setup_cnic_irq_info(bp);
2528 bnx2x_setup_cnic_info(bp);
2529 bp->cnic_loaded = true;
2530 if (bp->state == BNX2X_STATE_OPEN)
2531 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2532
Merav Sicron55c11942012-11-07 00:45:48 +00002533 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2534
2535 return 0;
2536
2537#ifndef BNX2X_STOP_ON_ERROR
2538load_error_cnic2:
2539 /* Disable Timer scan */
2540 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2541
2542load_error_cnic1:
2543 bnx2x_napi_disable_cnic(bp);
2544 /* Update the number of queues without the cnic queues */
Yuval Mintzd9d81862013-09-23 10:12:53 +03002545 if (bnx2x_set_real_num_queues(bp, 0))
Merav Sicron55c11942012-11-07 00:45:48 +00002546 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2547load_error_cnic0:
2548 BNX2X_ERR("CNIC-related load failed\n");
2549 bnx2x_free_fp_mem_cnic(bp);
2550 bnx2x_free_mem_cnic(bp);
2551 return rc;
2552#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002553}
2554
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002555/* must be called with rtnl_lock */
2556int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2557{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002558 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002559 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002560
Merav Sicron55c11942012-11-07 00:45:48 +00002561 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2562 DP(NETIF_MSG_IFUP,
2563 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2564
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002565#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002566 if (unlikely(bp->panic)) {
2567 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002568 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002569 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002570#endif
2571
2572 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2573
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002574 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002575 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2576 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2577 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002578
Ariel Eliorad5afc82013-01-01 05:22:26 +00002579 if (IS_PF(bp))
2580 /* must be called before memory allocation and HW init */
2581 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002582
Ariel Elior6383c0b2011-07-14 08:31:57 +00002583 /*
2584 * Zero fastpath structures preserving invariants like napi, which are
2585 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002586 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002587 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002588 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002589 for_each_queue(bp, i)
2590 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002591 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2592 bp->num_cnic_queues) *
2593 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002594
Merav Sicron55c11942012-11-07 00:45:48 +00002595 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002596
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002597 /* Set the receive queues buffer size */
2598 bnx2x_set_rx_buf_size(bp);
2599
Ariel Eliorad5afc82013-01-01 05:22:26 +00002600 if (IS_PF(bp)) {
2601 rc = bnx2x_alloc_mem(bp);
2602 if (rc) {
2603 BNX2X_ERR("Unable to allocate bp memory\n");
2604 return rc;
2605 }
2606 }
2607
Ariel Eliorad5afc82013-01-01 05:22:26 +00002608 /* need to be done after alloc mem, since it's self adjusting to amount
2609 * of memory available for RSS queues
2610 */
2611 rc = bnx2x_alloc_fp_mem(bp);
2612 if (rc) {
2613 BNX2X_ERR("Unable to allocate memory for fps\n");
2614 LOAD_ERROR_EXIT(bp, load_error0);
2615 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002616
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002617 /* Allocated memory for FW statistics */
2618 if (bnx2x_alloc_fw_stats_mem(bp))
2619 LOAD_ERROR_EXIT(bp, load_error0);
2620
Ariel Elior8d9ac292013-01-01 05:22:27 +00002621 /* request pf to initialize status blocks */
2622 if (IS_VF(bp)) {
2623 rc = bnx2x_vfpf_init(bp);
2624 if (rc)
2625 LOAD_ERROR_EXIT(bp, load_error0);
2626 }
2627
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002628 /* As long as bnx2x_alloc_mem() may possibly update
2629 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002630 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002631 */
Merav Sicron55c11942012-11-07 00:45:48 +00002632 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002633 if (rc) {
2634 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002635 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002636 }
2637
Ariel Elior6383c0b2011-07-14 08:31:57 +00002638 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002639 * this configuration may be overridden by a multi class queue
2640 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002641 */
2642 bnx2x_setup_tc(bp->dev, bp->max_cos);
2643
Merav Sicron26614ba2012-08-27 03:26:19 +00002644 /* Add all NAPI objects */
2645 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002646 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002647 bnx2x_napi_enable(bp);
2648
Ariel Eliorad5afc82013-01-01 05:22:26 +00002649 if (IS_PF(bp)) {
2650 /* set pf load just before approaching the MCP */
2651 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002652
Ariel Eliorad5afc82013-01-01 05:22:26 +00002653 /* if mcp exists send load request and analyze response */
2654 if (!BP_NOMCP(bp)) {
2655 /* attempt to load pf */
2656 rc = bnx2x_nic_load_request(bp, &load_code);
2657 if (rc)
2658 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002659
Ariel Eliorad5afc82013-01-01 05:22:26 +00002660 /* what did mcp say? */
Yuval Mintz91ebb922013-12-26 09:57:07 +02002661 rc = bnx2x_compare_fw_ver(bp, load_code, true);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002662 if (rc) {
2663 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002664 LOAD_ERROR_EXIT(bp, load_error2);
2665 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002666 } else {
2667 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002668 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002669
Ariel Eliorad5afc82013-01-01 05:22:26 +00002670 /* mark pmf if applicable */
2671 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002672
Ariel Eliorad5afc82013-01-01 05:22:26 +00002673 /* Init Function state controlling object */
2674 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002675
Ariel Eliorad5afc82013-01-01 05:22:26 +00002676 /* Initialize HW */
2677 rc = bnx2x_init_hw(bp, load_code);
2678 if (rc) {
2679 BNX2X_ERR("HW init failed, aborting\n");
2680 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2681 LOAD_ERROR_EXIT(bp, load_error2);
2682 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002683 }
2684
Yuval Mintzecf01c22013-04-22 02:53:03 +00002685 bnx2x_pre_irq_nic_init(bp);
2686
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002687 /* Connect to IRQs */
2688 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002689 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002690 BNX2X_ERR("setup irqs failed\n");
2691 if (IS_PF(bp))
2692 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002693 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002694 }
2695
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002696 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002697 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002698 /* Setup NIC internals and enable interrupts */
2699 bnx2x_post_irq_nic_init(bp, load_code);
2700
Ariel Eliorad5afc82013-01-01 05:22:26 +00002701 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002702 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002703
Ariel Eliorad5afc82013-01-01 05:22:26 +00002704 /* Set AFEX default VLAN tag to an invalid value */
2705 bp->afex_def_vlan_tag = -1;
2706 bnx2x_nic_load_afex_dcc(bp, load_code);
2707 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2708 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002709 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002710 BNX2X_ERR("Function start failed!\n");
2711 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2712
Merav Sicron55c11942012-11-07 00:45:48 +00002713 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002714 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002715
Ariel Eliorad5afc82013-01-01 05:22:26 +00002716 /* Send LOAD_DONE command to MCP */
2717 if (!BP_NOMCP(bp)) {
2718 load_code = bnx2x_fw_command(bp,
2719 DRV_MSG_CODE_LOAD_DONE, 0);
2720 if (!load_code) {
2721 BNX2X_ERR("MCP response failure, aborting\n");
2722 rc = -EBUSY;
2723 LOAD_ERROR_EXIT(bp, load_error3);
2724 }
2725 }
2726
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002727 /* initialize FW coalescing state machines in RAM */
2728 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002729 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002730
Ariel Elior60cad4e2013-09-04 14:09:22 +03002731 /* setup the leading queue */
2732 rc = bnx2x_setup_leading(bp);
2733 if (rc) {
2734 BNX2X_ERR("Setup leading failed!\n");
2735 LOAD_ERROR_EXIT(bp, load_error3);
2736 }
2737
2738 /* set up the rest of the queues */
2739 for_each_nondefault_eth_queue(bp, i) {
2740 if (IS_PF(bp))
2741 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2742 else /* VF */
2743 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002744 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002745 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002746 LOAD_ERROR_EXIT(bp, load_error3);
2747 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002748 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002749
Ariel Elior60cad4e2013-09-04 14:09:22 +03002750 /* setup rss */
2751 rc = bnx2x_init_rss(bp);
2752 if (rc) {
2753 BNX2X_ERR("PF RSS init failed\n");
2754 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002755 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002756
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002757 /* Now when Clients are configured we are ready to work */
2758 bp->state = BNX2X_STATE_OPEN;
2759
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002760 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002761 if (IS_PF(bp))
2762 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002763 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002764 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2765 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002766 if (rc) {
2767 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002768 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002769 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002770
Ariel Eliorad5afc82013-01-01 05:22:26 +00002771 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002772 bnx2x_update_max_mf_config(bp, bp->pending_max);
2773 bp->pending_max = 0;
2774 }
2775
Ariel Eliorad5afc82013-01-01 05:22:26 +00002776 if (bp->port.pmf) {
2777 rc = bnx2x_initial_phy_init(bp, load_mode);
2778 if (rc)
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002781 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002782
2783 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002784
2785 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002786 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002787
2788 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002789 switch (load_mode) {
2790 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002791 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002792 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793 break;
2794
2795 case LOAD_OPEN:
2796 netif_tx_start_all_queues(bp->dev);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002797 smp_mb__after_atomic();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002798 break;
2799
2800 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002801 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002802 bp->state = BNX2X_STATE_DIAG;
2803 break;
2804
2805 default:
2806 break;
2807 }
2808
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002809 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002810 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002811 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002812 bnx2x__link_status_update(bp);
2813
2814 /* start the timer */
2815 mod_timer(&bp->timer, jiffies + bp->current_interval);
2816
Merav Sicron55c11942012-11-07 00:45:48 +00002817 if (CNIC_ENABLED(bp))
2818 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002819
Yuval Mintz42f82772014-03-23 18:12:23 +02002820 if (IS_PF(bp))
2821 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2822
Ariel Eliorad5afc82013-01-01 05:22:26 +00002823 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2824 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002825 u32 val;
2826 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2827 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2828 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2829 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2830 }
2831
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002832 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002833 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002834 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002835 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002836 return -EBUSY;
2837 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002838
Barak Witkowski98768792012-06-19 07:48:31 +00002839 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2840 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2841 bnx2x_dcbx_init(bp, false);
2842
Merav Sicron55c11942012-11-07 00:45:48 +00002843 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2844
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002845 return 0;
2846
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002847#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002848load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002849 if (IS_PF(bp)) {
2850 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002851
Ariel Eliorad5afc82013-01-01 05:22:26 +00002852 /* Clean queueable objects */
2853 bnx2x_squeeze_objects(bp);
2854 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002855
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002856 /* Free SKBs, SGEs, TPA pool and driver internals */
2857 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002858 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002859 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002860
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002861 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002862 bnx2x_free_irq(bp);
2863load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002864 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002865 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2866 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2867 }
2868
2869 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002870load_error1:
2871 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002872 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002873
Ariel Elior889b9af2012-01-26 06:01:51 +00002874 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002875 if (IS_PF(bp))
2876 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002877load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002878 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkove3ed4ea2013-10-27 13:07:00 +02002879 bnx2x_free_fp_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002880 bnx2x_free_mem(bp);
2881
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002882 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002883#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002884}
2885
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002886int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002887{
2888 u8 rc = 0, cos, i;
2889
2890 /* Wait until tx fastpath tasks complete */
2891 for_each_tx_queue(bp, i) {
2892 struct bnx2x_fastpath *fp = &bp->fp[i];
2893
2894 for_each_cos_in_tx_queue(fp, cos)
2895 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2896 if (rc)
2897 return rc;
2898 }
2899 return 0;
2900}
2901
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002902/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002903int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002904{
2905 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002906 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002907
Merav Sicron55c11942012-11-07 00:45:48 +00002908 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2909
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002910 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002911 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002912 u32 val;
2913 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2914 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2915 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2916 }
2917
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002918 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002919 (bp->state == BNX2X_STATE_CLOSED ||
2920 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002921 /* We can get here if the driver has been unloaded
2922 * during parity error recovery and is either waiting for a
2923 * leader to complete or for other functions to unload and
2924 * then ifdown has been issued. In this case we want to
2925 * unload and let other functions to complete a recovery
2926 * process.
2927 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002928 bp->recovery_state = BNX2X_RECOVERY_DONE;
2929 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002930 bnx2x_release_leader_lock(bp);
2931 smp_mb();
2932
Merav Sicron51c1a582012-03-18 10:33:38 +00002933 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2934 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002935 return -EINVAL;
2936 }
2937
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002938 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002939 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002940 *
2941 * we can get here only after unsuccessful ndo_* callback, during which
2942 * dev->IFF_UP flag is still on.
2943 */
2944 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2945 return 0;
2946
2947 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002948 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2949 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2950 */
2951 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2952 smp_mb();
2953
Ariel Elior78c3bcc2013-06-20 17:39:08 +03002954 /* indicate to VFs that the PF is going down */
2955 bnx2x_iov_channel_down(bp);
2956
Merav Sicron55c11942012-11-07 00:45:48 +00002957 if (CNIC_LOADED(bp))
2958 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2959
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002960 /* Stop Tx */
2961 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002962 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002963
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002964 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002965
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002966 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002967
Ariel Eliorad5afc82013-01-01 05:22:26 +00002968 if (IS_PF(bp)) {
2969 /* Set ALWAYS_ALIVE bit in shmem */
2970 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2971 bnx2x_drv_pulse(bp);
2972 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2973 bnx2x_save_statistics(bp);
2974 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002975
Ariel Eliorad5afc82013-01-01 05:22:26 +00002976 /* wait till consumers catch up with producers in all queues */
2977 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002978
Ariel Elior9b176b62013-01-01 05:22:28 +00002979 /* if VF indicate to PF this function is going down (PF will delete sp
2980 * elements and clear initializations
2981 */
2982 if (IS_VF(bp))
2983 bnx2x_vfpf_close_vf(bp);
2984 else if (unload_mode != UNLOAD_RECOVERY)
2985 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002986 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002987 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002988 /* Send the UNLOAD_REQUEST to the MCP */
2989 bnx2x_send_unload_req(bp, unload_mode);
2990
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002991 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002992 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002993 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002994 * (the engine which leader will perform the recovery
2995 * last).
2996 */
2997 if (!CHIP_IS_E1x(bp))
2998 bnx2x_pf_disable(bp);
2999
3000 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003001 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00003002 /* Delete all NAPI objects */
3003 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003004 if (CNIC_LOADED(bp))
3005 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003006 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003007 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003008
3009 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00003010 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003011 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003012
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003013 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003014 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003015 * the queueable objects here in case they failed to get cleaned so far.
3016 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003017 if (IS_PF(bp))
3018 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003019
Vladislav Zolotarov79616892011-07-21 07:58:54 +00003020 /* There should be no more pending SP commands at this stage */
3021 bp->sp_state = 0;
3022
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003023 bp->port.pmf = 0;
3024
Dmitry Kravkova0d307b2013-11-17 08:59:26 +02003025 /* clear pending work in rtnl task */
3026 bp->sp_rtnl_state = 0;
3027 smp_mb();
3028
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003029 /* Free SKBs, SGEs, TPA pool and driver internals */
3030 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00003031 if (CNIC_LOADED(bp))
3032 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003033 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003034 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003035
Ariel Eliorad5afc82013-01-01 05:22:26 +00003036 bnx2x_free_fp_mem(bp);
3037 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00003038 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003039
Ariel Eliorad5afc82013-01-01 05:22:26 +00003040 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00003041 if (CNIC_LOADED(bp))
3042 bnx2x_free_mem_cnic(bp);
3043 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03003044 bnx2x_free_mem(bp);
3045
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003046 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00003047 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003048
Yuval Mintz42f82772014-03-23 18:12:23 +02003049 /* Clear driver version indication in shmem */
3050 if (IS_PF(bp))
3051 bnx2x_update_mng_version(bp);
3052
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003053 /* Check if there are pending parity attentions. If there are - set
3054 * RECOVERY_IN_PROGRESS.
3055 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003056 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00003057 bnx2x_set_reset_in_progress(bp);
3058
3059 /* Set RESET_IS_GLOBAL if needed */
3060 if (global)
3061 bnx2x_set_reset_global(bp);
3062 }
3063
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003064 /* The last driver must disable a "close the gate" if there is no
3065 * parity attention or "process kill" pending.
3066 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00003067 if (IS_PF(bp) &&
3068 !bnx2x_clear_pf_load(bp) &&
3069 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003070 bnx2x_disable_close_the_gate(bp);
3071
Merav Sicron55c11942012-11-07 00:45:48 +00003072 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3073
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003074 return 0;
3075}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003076
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003077int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3078{
3079 u16 pmcsr;
3080
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003081 /* If there is no power capability, silently succeed */
Jon Mason29ed74c2013-09-11 11:22:39 -07003082 if (!bp->pdev->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003083 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003084 return 0;
3085 }
3086
Jon Mason29ed74c2013-09-11 11:22:39 -07003087 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003088
3089 switch (state) {
3090 case PCI_D0:
Jon Mason29ed74c2013-09-11 11:22:39 -07003091 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003092 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3093 PCI_PM_CTRL_PME_STATUS));
3094
3095 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3096 /* delay required during transition out of D3hot */
3097 msleep(20);
3098 break;
3099
3100 case PCI_D3hot:
3101 /* If there are other clients above don't
3102 shut down the power */
3103 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3104 return 0;
3105 /* Don't shut down the power for emulation and FPGA */
3106 if (CHIP_REV_IS_SLOW(bp))
3107 return 0;
3108
3109 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3110 pmcsr |= 3;
3111
3112 if (bp->wol)
3113 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3114
Jon Mason29ed74c2013-09-11 11:22:39 -07003115 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003116 pmcsr);
3117
3118 /* No more memory access after this point until
3119 * device is brought back to D0.
3120 */
3121 break;
3122
3123 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003124 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003125 return -EINVAL;
3126 }
3127 return 0;
3128}
3129
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003130/*
3131 * net_device service functions
3132 */
stephen hemmingera8f47eb2014-01-09 22:20:11 -08003133static int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003134{
3135 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003136 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003137 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3138 napi);
3139 struct bnx2x *bp = fp->bp;
3140
3141 while (1) {
3142#ifdef BNX2X_STOP_ON_ERROR
3143 if (unlikely(bp->panic)) {
3144 napi_complete(napi);
3145 return 0;
3146 }
3147#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003148 if (!bnx2x_fp_lock_napi(fp))
3149 return work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003150
Ariel Elior6383c0b2011-07-14 08:31:57 +00003151 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003152 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3153 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003154
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003155 if (bnx2x_has_rx_work(fp)) {
3156 work_done += bnx2x_rx_int(fp, budget - work_done);
3157
3158 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003159 if (work_done >= budget) {
3160 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003161 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003162 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003163 }
3164
3165 /* Fall out from the NAPI loop if needed */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003166 if (!bnx2x_fp_unlock_napi(fp) &&
3167 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003168
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003169 /* No need to update SB for FCoE L2 ring as long as
3170 * it's connected to the default SB and the SB
3171 * has been updated when NAPI was scheduled.
3172 */
3173 if (IS_FCOE_FP(fp)) {
3174 napi_complete(napi);
3175 break;
3176 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003177 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003178 /* bnx2x_has_rx_work() reads the status block,
3179 * thus we need to ensure that status block indices
3180 * have been actually read (bnx2x_update_fpsb_idx)
3181 * prior to this check (bnx2x_has_rx_work) so that
3182 * we won't write the "newer" value of the status block
3183 * to IGU (if there was a DMA right after
3184 * bnx2x_has_rx_work and if there is no rmb, the memory
3185 * reading (bnx2x_update_fpsb_idx) may be postponed
3186 * to right before bnx2x_ack_sb). In this case there
3187 * will never be another interrupt until there is
3188 * another update of the status block, while there
3189 * is still unhandled work.
3190 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003191 rmb();
3192
3193 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3194 napi_complete(napi);
3195 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003196 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003197 "Update index to %d\n", fp->fp_hc_idx);
3198 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3199 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003200 IGU_INT_ENABLE, 1);
3201 break;
3202 }
3203 }
3204 }
3205
3206 return work_done;
3207}
3208
Cong Wange0d10952013-08-01 11:10:25 +08003209#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003210/* must be called with local_bh_disable()d */
3211int bnx2x_low_latency_recv(struct napi_struct *napi)
3212{
3213 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3214 napi);
3215 struct bnx2x *bp = fp->bp;
3216 int found = 0;
3217
3218 if ((bp->state == BNX2X_STATE_CLOSED) ||
3219 (bp->state == BNX2X_STATE_ERROR) ||
3220 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3221 return LL_FLUSH_FAILED;
3222
3223 if (!bnx2x_fp_lock_poll(fp))
3224 return LL_FLUSH_BUSY;
3225
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003226 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003227 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003228
3229 bnx2x_fp_unlock_poll(fp);
3230
3231 return found;
3232}
3233#endif
3234
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003235/* we split the first BD into headers and data BDs
3236 * to ease the pain of our fellow microcode engineers
3237 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003238 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003239static u16 bnx2x_tx_split(struct bnx2x *bp,
3240 struct bnx2x_fp_txdata *txdata,
3241 struct sw_tx_bd *tx_buf,
3242 struct eth_tx_start_bd **tx_bd, u16 hlen,
3243 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003244{
3245 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3246 struct eth_tx_bd *d_tx_bd;
3247 dma_addr_t mapping;
3248 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3249
3250 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003251 h_tx_bd->nbytes = cpu_to_le16(hlen);
3252
Dmitry Kravkov91226792013-03-11 05:17:52 +00003253 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3254 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003255
3256 /* now get a new data BD
3257 * (after the pbd) and fill it */
3258 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003259 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003260
3261 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3262 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3263
3264 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3265 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3266 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3267
3268 /* this marks the BD as one that has no individual mapping */
3269 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3270
3271 DP(NETIF_MSG_TX_QUEUED,
3272 "TSO split data size is %d (%x:%x)\n",
3273 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3274
3275 /* update tx_bd */
3276 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3277
3278 return bd_prod;
3279}
3280
Yuval Mintz86564c32013-01-23 03:21:50 +00003281#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3282#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003283static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003284{
Yuval Mintz86564c32013-01-23 03:21:50 +00003285 __sum16 tsum = (__force __sum16) csum;
3286
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003287 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003288 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3289 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003290
3291 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003292 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3293 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003294
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003295 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003296}
3297
Dmitry Kravkov91226792013-03-11 05:17:52 +00003298static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003299{
3300 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003301 __u8 prot = 0;
3302 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003303
3304 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003305 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003306
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003307 protocol = vlan_get_protocol(skb);
3308 if (protocol == htons(ETH_P_IPV6)) {
3309 rc = XMIT_CSUM_V6;
3310 prot = ipv6_hdr(skb)->nexthdr;
3311 } else {
3312 rc = XMIT_CSUM_V4;
3313 prot = ip_hdr(skb)->protocol;
3314 }
3315
3316 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3317 if (inner_ip_hdr(skb)->version == 6) {
3318 rc |= XMIT_CSUM_ENC_V6;
3319 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003320 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003321 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003322 rc |= XMIT_CSUM_ENC_V4;
3323 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003324 rc |= XMIT_CSUM_TCP;
3325 }
3326 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003327 if (prot == IPPROTO_TCP)
3328 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003329
Eric Dumazet36a8f392013-09-29 01:21:32 -07003330 if (skb_is_gso(skb)) {
3331 if (skb_is_gso_v6(skb)) {
3332 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3333 if (rc & XMIT_CSUM_ENC)
3334 rc |= XMIT_GSO_ENC_V6;
3335 } else {
3336 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3337 if (rc & XMIT_CSUM_ENC)
3338 rc |= XMIT_GSO_ENC_V4;
3339 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003340 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003341
3342 return rc;
3343}
3344
3345#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3346/* check if packet requires linearization (packet is too fragmented)
3347 no need to check fragmentation if page size > 8K (there will be no
3348 violation to FW restrictions) */
3349static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3350 u32 xmit_type)
3351{
3352 int to_copy = 0;
3353 int hlen = 0;
3354 int first_bd_sz = 0;
3355
3356 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3357 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3358
3359 if (xmit_type & XMIT_GSO) {
3360 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3361 /* Check if LSO packet needs to be copied:
3362 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3363 int wnd_size = MAX_FETCH_BD - 3;
3364 /* Number of windows to check */
3365 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3366 int wnd_idx = 0;
3367 int frag_idx = 0;
3368 u32 wnd_sum = 0;
3369
3370 /* Headers length */
3371 hlen = (int)(skb_transport_header(skb) - skb->data) +
3372 tcp_hdrlen(skb);
3373
3374 /* Amount of data (w/o headers) on linear part of SKB*/
3375 first_bd_sz = skb_headlen(skb) - hlen;
3376
3377 wnd_sum = first_bd_sz;
3378
3379 /* Calculate the first sum - it's special */
3380 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3381 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003382 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003383
3384 /* If there was data on linear skb data - check it */
3385 if (first_bd_sz > 0) {
3386 if (unlikely(wnd_sum < lso_mss)) {
3387 to_copy = 1;
3388 goto exit_lbl;
3389 }
3390
3391 wnd_sum -= first_bd_sz;
3392 }
3393
3394 /* Others are easier: run through the frag list and
3395 check all windows */
3396 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3397 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003398 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003399
3400 if (unlikely(wnd_sum < lso_mss)) {
3401 to_copy = 1;
3402 break;
3403 }
3404 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003405 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003406 }
3407 } else {
3408 /* in non-LSO too fragmented packet should always
3409 be linearized */
3410 to_copy = 1;
3411 }
3412 }
3413
3414exit_lbl:
3415 if (unlikely(to_copy))
3416 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003417 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003418 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3419 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3420
3421 return to_copy;
3422}
3423#endif
3424
Dmitry Kravkov91226792013-03-11 05:17:52 +00003425static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3426 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003427{
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003428 struct ipv6hdr *ipv6;
3429
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003430 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3431 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3432 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003433
3434 if (xmit_type & XMIT_GSO_ENC_V6)
3435 ipv6 = inner_ipv6_hdr(skb);
3436 else if (xmit_type & XMIT_GSO_V6)
3437 ipv6 = ipv6_hdr(skb);
3438 else
3439 ipv6 = NULL;
3440
3441 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003442 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003443}
3444
3445/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003446 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003447 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003448 * @skb: packet skb
3449 * @pbd: parse BD
3450 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003451 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003452static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3453 struct eth_tx_parse_bd_e1x *pbd,
Yuval Mintz057cf652013-05-19 04:41:01 +00003454 struct eth_tx_start_bd *tx_start_bd,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003455 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003456{
3457 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003458 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003459 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003460
3461 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003462 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003463 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003464 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3465 ip_hdr(skb)->daddr,
3466 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003467
Yuval Mintz057cf652013-05-19 04:41:01 +00003468 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3469 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3470 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003471 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003472 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3473 &ipv6_hdr(skb)->daddr,
3474 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003475 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003476
Yuval Mintz86564c32013-01-23 03:21:50 +00003477 pbd->global_data |=
3478 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003479}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003480
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003481/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003482 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3483 *
3484 * @bp: driver handle
3485 * @skb: packet skb
3486 * @parsing_data: data to be updated
3487 * @xmit_type: xmit flags
3488 *
3489 * 57712/578xx related, when skb has encapsulation
3490 */
3491static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3492 u32 *parsing_data, u32 xmit_type)
3493{
3494 *parsing_data |=
3495 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3496 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3497 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3498
3499 if (xmit_type & XMIT_CSUM_TCP) {
3500 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3501 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3502 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3503
3504 return skb_inner_transport_header(skb) +
3505 inner_tcp_hdrlen(skb) - skb->data;
3506 }
3507
3508 /* We support checksum offload for TCP and UDP only.
3509 * No need to pass the UDP header length - it's a constant.
3510 */
3511 return skb_inner_transport_header(skb) +
3512 sizeof(struct udphdr) - skb->data;
3513}
3514
3515/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003516 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003517 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003518 * @bp: driver handle
3519 * @skb: packet skb
3520 * @parsing_data: data to be updated
3521 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003522 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003523 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003524 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003525static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3526 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003527{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003528 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003529 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003530 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3531 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003532
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003533 if (xmit_type & XMIT_CSUM_TCP) {
3534 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3535 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3536 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003537
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003538 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003539 }
3540 /* We support checksum offload for TCP and UDP only.
3541 * No need to pass the UDP header length - it's a constant.
3542 */
3543 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003544}
3545
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003546/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003547static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3548 struct eth_tx_start_bd *tx_start_bd,
3549 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003550{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003551 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3552
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003553 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003554 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003555
3556 if (!(xmit_type & XMIT_CSUM_TCP))
3557 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003558}
3559
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003560/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003561 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003562 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003563 * @bp: driver handle
3564 * @skb: packet skb
3565 * @pbd: parse BD to be updated
3566 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003567 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003568static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3569 struct eth_tx_parse_bd_e1x *pbd,
3570 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003571{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003572 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003573
3574 /* for now NS flag is not used in Linux */
3575 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003576 cpu_to_le16(hlen |
3577 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3578 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003579
3580 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003581 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003582
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003583 hlen += pbd->ip_hlen_w;
3584
3585 /* We support checksum offload for TCP and UDP only */
3586 if (xmit_type & XMIT_CSUM_TCP)
3587 hlen += tcp_hdrlen(skb) / 2;
3588 else
3589 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003590
3591 pbd->total_hlen_w = cpu_to_le16(hlen);
3592 hlen = hlen*2;
3593
3594 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003595 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003596
3597 } else {
3598 s8 fix = SKB_CS_OFF(skb); /* signed! */
3599
3600 DP(NETIF_MSG_TX_QUEUED,
3601 "hlen %d fix %d csum before fix %x\n",
3602 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3603
3604 /* HW bug: fixup the CSUM */
3605 pbd->tcp_pseudo_csum =
3606 bnx2x_csum_fix(skb_transport_header(skb),
3607 SKB_CS(skb), fix);
3608
3609 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3610 pbd->tcp_pseudo_csum);
3611 }
3612
3613 return hlen;
3614}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003615
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003616static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3617 struct eth_tx_parse_bd_e2 *pbd_e2,
3618 struct eth_tx_parse_2nd_bd *pbd2,
3619 u16 *global_data,
3620 u32 xmit_type)
3621{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003622 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003623 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003624
Dmitry Kravkove287a752013-03-21 15:38:24 +00003625 /* from outer IP to transport */
3626 hlen_w = (skb_inner_transport_header(skb) -
3627 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003628
3629 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003630 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003631
Dmitry Kravkove287a752013-03-21 15:38:24 +00003632 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003633
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003634 /* outer IP header info */
3635 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003636 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003637 u32 csum = (__force u32)(~iph->check) -
3638 (__force u32)iph->tot_len -
3639 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003640
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003641 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003642 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003643 } else {
3644 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003645 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003646 }
3647
3648 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3649
3650 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3651
3652 if (xmit_type & XMIT_GSO_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003653 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003654
3655 pbd_e2->data.tunnel_data.pseudo_csum =
3656 bswab16(~csum_tcpudp_magic(
3657 inner_ip_hdr(skb)->saddr,
3658 inner_ip_hdr(skb)->daddr,
3659 0, IPPROTO_TCP, 0));
3660
3661 outerip_len = ip_hdr(skb)->ihl << 1;
3662 } else {
3663 pbd_e2->data.tunnel_data.pseudo_csum =
3664 bswab16(~csum_ipv6_magic(
3665 &inner_ipv6_hdr(skb)->saddr,
3666 &inner_ipv6_hdr(skb)->daddr,
3667 0, IPPROTO_TCP, 0));
3668 }
3669
3670 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3671
3672 *global_data |=
3673 outerip_off |
3674 (!!(xmit_type & XMIT_CSUM_V6) <<
3675 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3676 (outerip_len <<
3677 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3678 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3679 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003680
3681 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3682 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3683 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3684 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003685}
3686
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003687/* called with netif_tx_lock
3688 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3689 * netif_wake_queue()
3690 */
3691netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3692{
3693 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003694
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003695 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003696 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003697 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003698 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003699 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003700 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003701 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003702 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003703 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003704 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003705 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003706 dma_addr_t mapping;
3707 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3708 int i;
3709 u8 hlen = 0;
3710 __le16 pkt_size = 0;
3711 struct ethhdr *eth;
3712 u8 mac_type = UNICAST_ADDRESS;
3713
3714#ifdef BNX2X_STOP_ON_ERROR
3715 if (unlikely(bp->panic))
3716 return NETDEV_TX_BUSY;
3717#endif
3718
Ariel Elior6383c0b2011-07-14 08:31:57 +00003719 txq_index = skb_get_queue_mapping(skb);
3720 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003721
Merav Sicron55c11942012-11-07 00:45:48 +00003722 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003723
Merav Sicron65565882012-06-19 07:48:26 +00003724 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003725
3726 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003727 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003728 txq_index, fp_index, txdata_index); */
3729
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003730 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003731 DP(NETIF_MSG_TX_QUEUED,
3732 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003733 txdata->cid, fp_index, txdata_index, txdata, fp); */
3734
3735 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003736 skb_shinfo(skb)->nr_frags +
3737 BDS_PER_TX_PKT +
3738 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003739 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003740 if (txdata->tx_ring_size == 0) {
3741 struct bnx2x_eth_q_stats *q_stats =
3742 bnx2x_fp_qstats(bp, txdata->parent_fp);
3743 q_stats->driver_filtered_tx_pkt++;
3744 dev_kfree_skb(skb);
3745 return NETDEV_TX_OK;
3746 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003747 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3748 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003749 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003750
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003751 return NETDEV_TX_BUSY;
3752 }
3753
Merav Sicron51c1a582012-03-18 10:33:38 +00003754 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003755 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003756 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003757 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3758 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003759
3760 eth = (struct ethhdr *)skb->data;
3761
3762 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3763 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3764 if (is_broadcast_ether_addr(eth->h_dest))
3765 mac_type = BROADCAST_ADDRESS;
3766 else
3767 mac_type = MULTICAST_ADDRESS;
3768 }
3769
Dmitry Kravkov91226792013-03-11 05:17:52 +00003770#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003771 /* First, check if we need to linearize the skb (due to FW
3772 restrictions). No need to check fragmentation if page size > 8K
3773 (there will be no violation to FW restrictions) */
3774 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3775 /* Statistics of linearization */
3776 bp->lin_cnt++;
3777 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003778 DP(NETIF_MSG_TX_QUEUED,
3779 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003780 dev_kfree_skb_any(skb);
3781 return NETDEV_TX_OK;
3782 }
3783 }
3784#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003785 /* Map skb linear data for DMA */
3786 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3787 skb_headlen(skb), DMA_TO_DEVICE);
3788 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003789 DP(NETIF_MSG_TX_QUEUED,
3790 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003791 dev_kfree_skb_any(skb);
3792 return NETDEV_TX_OK;
3793 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003794 /*
3795 Please read carefully. First we use one BD which we mark as start,
3796 then we have a parsing info BD (used for TSO or xsum),
3797 and only then we have the rest of the TSO BDs.
3798 (don't forget to mark the last one as last,
3799 and to unmap only AFTER you write to the BD ...)
3800 And above all, all pdb sizes are in words - NOT DWORDS!
3801 */
3802
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003803 /* get current pkt produced now - advance it just before sending packet
3804 * since mapping of pages may fail and cause packet to be dropped
3805 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003806 pkt_prod = txdata->tx_pkt_prod;
3807 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003808
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003809 /* get a tx_buf and first BD
3810 * tx_start_bd may be changed during SPLIT,
3811 * but first_bd will always stay first
3812 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003813 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3814 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003815 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003816
3817 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003818
Dmitry Kravkov91226792013-03-11 05:17:52 +00003819 /* header nbd: indirectly zero other flags! */
3820 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003821
3822 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003823 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003824 tx_buf->skb = skb;
3825 tx_buf->flags = 0;
3826
3827 DP(NETIF_MSG_TX_QUEUED,
3828 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003829 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003830
Jesse Grosseab6d182010-10-20 13:56:03 +00003831 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003832 tx_start_bd->vlan_or_ethertype =
3833 cpu_to_le16(vlan_tx_tag_get(skb));
3834 tx_start_bd->bd_flags.as_bitfield |=
3835 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003836 } else {
3837 /* when transmitting in a vf, start bd must hold the ethertype
3838 * for fw to enforce it
3839 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003840 if (IS_VF(bp))
Ariel Eliordc1ba592013-01-01 05:22:30 +00003841 tx_start_bd->vlan_or_ethertype =
3842 cpu_to_le16(ntohs(eth->h_proto));
Dmitry Kravkov91226792013-03-11 05:17:52 +00003843 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003844 /* used by FW for packet accounting */
3845 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003846 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003847
Dmitry Kravkov91226792013-03-11 05:17:52 +00003848 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3849
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003850 /* turn on parsing and get a BD */
3851 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003852
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003853 if (xmit_type & XMIT_CSUM)
3854 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003855
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003856 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003857 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003858 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003859
3860 if (xmit_type & XMIT_CSUM_ENC) {
3861 u16 global_data = 0;
3862
3863 /* Set PBD in enc checksum offload case */
3864 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3865 &pbd_e2_parsing_data,
3866 xmit_type);
3867
3868 /* turn on 2nd parsing and get a BD */
3869 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3870
3871 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3872
3873 memset(pbd2, 0, sizeof(*pbd2));
3874
3875 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3876 (skb_inner_network_header(skb) -
3877 skb->data) >> 1;
3878
3879 if (xmit_type & XMIT_GSO_ENC)
3880 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3881 &global_data,
3882 xmit_type);
3883
3884 pbd2->global_data = cpu_to_le16(global_data);
3885
3886 /* add addition parse BD indication to start BD */
3887 SET_FLAG(tx_start_bd->general_data,
3888 ETH_TX_START_BD_PARSE_NBDS, 1);
3889 /* set encapsulation flag in start BD */
3890 SET_FLAG(tx_start_bd->general_data,
3891 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3892 nbd++;
3893 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003894 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003895 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3896 &pbd_e2_parsing_data,
3897 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003898 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003899
Yuval Mintzbabe7232014-02-27 15:42:26 +02003900 /* Add the macs to the parsing BD if this is a vf or if
3901 * Tx Switching is enabled.
3902 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003903 if (IS_VF(bp)) {
3904 /* override GRE parameters in BD */
3905 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3906 &pbd_e2->data.mac_addr.src_mid,
3907 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003908 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003909
3910 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3911 &pbd_e2->data.mac_addr.dst_mid,
3912 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003913 eth->h_dest);
Yuval Mintzbabe7232014-02-27 15:42:26 +02003914 } else if (bp->flags & TX_SWITCHING) {
3915 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3916 &pbd_e2->data.mac_addr.dst_mid,
3917 &pbd_e2->data.mac_addr.dst_lo,
3918 eth->h_dest);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003919 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003920
3921 SET_FLAG(pbd_e2_parsing_data,
3922 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003923 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003924 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003925 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003926 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3927 /* Set PBD in checksum offload case */
3928 if (xmit_type & XMIT_CSUM)
3929 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003930
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003931 SET_FLAG(global_data,
3932 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3933 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003934 }
3935
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003936 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003937 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3938 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003939 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3940 pkt_size = tx_start_bd->nbytes;
3941
Merav Sicron51c1a582012-03-18 10:33:38 +00003942 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003943 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003944 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003945 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003946 tx_start_bd->bd_flags.as_bitfield,
3947 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003948
3949 if (xmit_type & XMIT_GSO) {
3950
3951 DP(NETIF_MSG_TX_QUEUED,
3952 "TSO packet len %d hlen %d total len %d tso size %d\n",
3953 skb->len, hlen, skb_headlen(skb),
3954 skb_shinfo(skb)->gso_size);
3955
3956 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3957
Dmitry Kravkov91226792013-03-11 05:17:52 +00003958 if (unlikely(skb_headlen(skb) > hlen)) {
3959 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003960 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3961 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003962 bd_prod);
3963 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003964 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003965 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3966 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003967 else
Yuval Mintz44dbc782013-06-03 02:59:57 +00003968 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003969 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003970
3971 /* Set the PBD's parsing_data field if not zero
3972 * (for the chips newer than 57711).
3973 */
3974 if (pbd_e2_parsing_data)
3975 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3976
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003977 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3978
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003979 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003980 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3981 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3982
Eric Dumazet9e903e02011-10-18 21:00:24 +00003983 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3984 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003985 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003986 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003987
Merav Sicron51c1a582012-03-18 10:33:38 +00003988 DP(NETIF_MSG_TX_QUEUED,
3989 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003990
3991 /* we need unmap all buffers already mapped
3992 * for this SKB;
3993 * first_bd->nbd need to be properly updated
3994 * before call to bnx2x_free_tx_pkt
3995 */
3996 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003997 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003998 TX_BD(txdata->tx_pkt_prod),
3999 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004000 return NETDEV_TX_OK;
4001 }
4002
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004003 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00004004 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004005 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00004006 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004007
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004008 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4009 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00004010 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4011 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004012 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004013
4014 DP(NETIF_MSG_TX_QUEUED,
4015 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4016 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4017 le16_to_cpu(tx_data_bd->nbytes));
4018 }
4019
4020 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4021
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004022 /* update with actual num BDs */
4023 first_bd->nbd = cpu_to_le16(nbd);
4024
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004025 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4026
4027 /* now send a tx doorbell, counting the next BD
4028 * if the packet contains or ends with it
4029 */
4030 if (TX_BD_POFF(bd_prod) < nbd)
4031 nbd++;
4032
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004033 /* total_pkt_bytes should be set on the first data BD if
4034 * it's not an LSO packet and there is more than one
4035 * data BD. In this case pkt_size is limited by an MTU value.
4036 * However we prefer to set it for an LSO packet (while we don't
4037 * have to) in order to save some CPU cycles in a none-LSO
4038 * case, when we much more care about them.
4039 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004040 if (total_pkt_bd != NULL)
4041 total_pkt_bd->total_pkt_bytes = pkt_size;
4042
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004043 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004044 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00004045 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004046 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4047 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4048 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4049 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004050 if (pbd_e2)
4051 DP(NETIF_MSG_TX_QUEUED,
4052 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00004053 pbd_e2,
4054 pbd_e2->data.mac_addr.dst_hi,
4055 pbd_e2->data.mac_addr.dst_mid,
4056 pbd_e2->data.mac_addr.dst_lo,
4057 pbd_e2->data.mac_addr.src_hi,
4058 pbd_e2->data.mac_addr.src_mid,
4059 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004060 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004061 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4062
Tom Herbert2df1a702011-11-28 16:33:37 +00004063 netdev_tx_sent_queue(txq, skb->len);
4064
Willem de Bruijn8373c572012-04-27 09:04:06 +00004065 skb_tx_timestamp(skb);
4066
Ariel Elior6383c0b2011-07-14 08:31:57 +00004067 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004068 /*
4069 * Make sure that the BD data is updated before updating the producer
4070 * since FW might read the BD right after the producer is updated.
4071 * This is only applicable for weak-ordered memory model archs such
4072 * as IA-64. The following barrier is also mandatory since FW will
4073 * assumes packets must have BDs.
4074 */
4075 wmb();
4076
Ariel Elior6383c0b2011-07-14 08:31:57 +00004077 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004078 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004079
Ariel Elior6383c0b2011-07-14 08:31:57 +00004080 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004081
4082 mmiowb();
4083
Ariel Elior6383c0b2011-07-14 08:31:57 +00004084 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004085
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004086 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004087 netif_tx_stop_queue(txq);
4088
4089 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4090 * ordering of set_bit() in netif_tx_stop_queue() and read of
4091 * fp->bd_tx_cons */
4092 smp_mb();
4093
Barak Witkowski15192a82012-06-19 07:48:28 +00004094 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004095 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004096 netif_tx_wake_queue(txq);
4097 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004098 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004099
4100 return NETDEV_TX_OK;
4101}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004102
Ariel Elior6383c0b2011-07-14 08:31:57 +00004103/**
4104 * bnx2x_setup_tc - routine to configure net_device for multi tc
4105 *
4106 * @netdev: net device to configure
4107 * @tc: number of traffic classes to enable
4108 *
4109 * callback connected to the ndo_setup_tc function pointer
4110 */
4111int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4112{
4113 int cos, prio, count, offset;
4114 struct bnx2x *bp = netdev_priv(dev);
4115
4116 /* setup tc must be called under rtnl lock */
4117 ASSERT_RTNL();
4118
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004119 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004120 if (!num_tc) {
4121 netdev_reset_tc(dev);
4122 return 0;
4123 }
4124
4125 /* requested to support too many traffic classes */
4126 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004127 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004128 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004129 return -EINVAL;
4130 }
4131
4132 /* declare amount of supported traffic classes */
4133 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004134 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004135 return -EINVAL;
4136 }
4137
4138 /* configure priority to traffic class mapping */
4139 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4140 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004141 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4142 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004143 prio, bp->prio_to_cos[prio]);
4144 }
4145
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004146 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004147 This can be used for ets or pfc, and save the effort of setting
4148 up a multio class queue disc or negotiating DCBX with a switch
4149 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004150 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004151 for (prio = 1; prio < 16; prio++) {
4152 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004153 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004154 } */
4155
4156 /* configure traffic class to transmission queue mapping */
4157 for (cos = 0; cos < bp->max_cos; cos++) {
4158 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004159 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004160 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004161 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4162 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004163 cos, offset, count);
4164 }
4165
4166 return 0;
4167}
4168
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004169/* called with rtnl_lock */
4170int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4171{
4172 struct sockaddr *addr = p;
4173 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004174 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004175
Merav Sicron51c1a582012-03-18 10:33:38 +00004176 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4177 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004178 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004179 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004180
Barak Witkowskia3348722012-04-23 03:04:46 +00004181 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4182 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004183 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004184 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004185 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004186
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004187 if (netif_running(dev)) {
4188 rc = bnx2x_set_eth_mac(bp, false);
4189 if (rc)
4190 return rc;
4191 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004192
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004193 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4194
4195 if (netif_running(dev))
4196 rc = bnx2x_set_eth_mac(bp, true);
4197
4198 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004199}
4200
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004201static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4202{
4203 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4204 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004205 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004206
4207 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004208
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004209 if (IS_FCOE_IDX(fp_index)) {
4210 memset(sb, 0, sizeof(union host_hc_status_block));
4211 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004212 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004213 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004214 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004215 BNX2X_PCI_FREE(sb->e2_sb,
4216 bnx2x_fp(bp, fp_index,
4217 status_blk_mapping),
4218 sizeof(struct host_hc_status_block_e2));
4219 else
4220 BNX2X_PCI_FREE(sb->e1x_sb,
4221 bnx2x_fp(bp, fp_index,
4222 status_blk_mapping),
4223 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004224 }
Merav Sicron55c11942012-11-07 00:45:48 +00004225
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004226 /* Rx */
4227 if (!skip_rx_queue(bp, fp_index)) {
4228 bnx2x_free_rx_bds(fp);
4229
4230 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4231 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4232 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4233 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4234 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4235
4236 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4237 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4238 sizeof(struct eth_fast_path_rx_cqe) *
4239 NUM_RCQ_BD);
4240
4241 /* SGE ring */
4242 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4243 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4244 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4245 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4246 }
4247
4248 /* Tx */
4249 if (!skip_tx_queue(bp, fp_index)) {
4250 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004251 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004252 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004253
Merav Sicron51c1a582012-03-18 10:33:38 +00004254 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004255 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004256 fp_index, cos, txdata->cid);
4257
4258 BNX2X_FREE(txdata->tx_buf_ring);
4259 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4260 txdata->tx_desc_mapping,
4261 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4262 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004263 }
4264 /* end of fastpath */
4265}
4266
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004267static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004268{
4269 int i;
4270 for_each_cnic_queue(bp, i)
4271 bnx2x_free_fp_mem_at(bp, i);
4272}
4273
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004274void bnx2x_free_fp_mem(struct bnx2x *bp)
4275{
4276 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004277 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004278 bnx2x_free_fp_mem_at(bp, i);
4279}
4280
Eric Dumazet1191cb82012-04-27 21:39:21 +00004281static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004282{
4283 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004284 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004285 bnx2x_fp(bp, index, sb_index_values) =
4286 (__le16 *)status_blk.e2_sb->sb.index_values;
4287 bnx2x_fp(bp, index, sb_running_index) =
4288 (__le16 *)status_blk.e2_sb->sb.running_index;
4289 } else {
4290 bnx2x_fp(bp, index, sb_index_values) =
4291 (__le16 *)status_blk.e1x_sb->sb.index_values;
4292 bnx2x_fp(bp, index, sb_running_index) =
4293 (__le16 *)status_blk.e1x_sb->sb.running_index;
4294 }
4295}
4296
Eric Dumazet1191cb82012-04-27 21:39:21 +00004297/* Returns the number of actually allocated BDs */
4298static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4299 int rx_ring_size)
4300{
4301 struct bnx2x *bp = fp->bp;
4302 u16 ring_prod, cqe_ring_prod;
4303 int i, failure_cnt = 0;
4304
4305 fp->rx_comp_cons = 0;
4306 cqe_ring_prod = ring_prod = 0;
4307
4308 /* This routine is called only during fo init so
4309 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4310 */
4311 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004312 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004313 failure_cnt++;
4314 continue;
4315 }
4316 ring_prod = NEXT_RX_IDX(ring_prod);
4317 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4318 WARN_ON(ring_prod <= (i - failure_cnt));
4319 }
4320
4321 if (failure_cnt)
4322 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4323 i - failure_cnt, fp->index);
4324
4325 fp->rx_bd_prod = ring_prod;
4326 /* Limit the CQE producer by the CQE ring size */
4327 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4328 cqe_ring_prod);
4329 fp->rx_pkt = fp->rx_calls = 0;
4330
Barak Witkowski15192a82012-06-19 07:48:28 +00004331 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004332
4333 return i - failure_cnt;
4334}
4335
4336static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4337{
4338 int i;
4339
4340 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4341 struct eth_rx_cqe_next_page *nextpg;
4342
4343 nextpg = (struct eth_rx_cqe_next_page *)
4344 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4345 nextpg->addr_hi =
4346 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4347 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4348 nextpg->addr_lo =
4349 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351 }
4352}
4353
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004354static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4355{
4356 union host_hc_status_block *sb;
4357 struct bnx2x_fastpath *fp = &bp->fp[index];
4358 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004359 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004360 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004361
Barak Witkowskia3348722012-04-23 03:04:46 +00004362 if (!bp->rx_ring_size &&
4363 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004364 rx_ring_size = MIN_RX_SIZE_NONTPA;
4365 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004366 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004367 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4368
Yuval Mintz065f8b92012-10-03 04:22:59 +00004369 if (CHIP_IS_E3(bp)) {
4370 u32 cfg = SHMEM_RD(bp,
4371 dev_info.port_hw_config[BP_PORT(bp)].
4372 default_cfg);
4373
4374 /* Decrease ring size for 1G functions */
4375 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4376 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4377 rx_ring_size /= 10;
4378 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004379
David S. Miller8decf862011-09-22 03:23:13 -04004380 /* allocate at least number of buffers required by FW */
4381 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4382 MIN_RX_SIZE_TPA, rx_ring_size);
4383
4384 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004385 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004386 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004387
Yuval Mintz04c46732013-01-23 03:21:46 +00004388 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4389
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004390 /* Common */
4391 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004392
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004393 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004394 /* status blocks */
Joe Perchescd2b0382014-02-20 13:25:51 -08004395 if (!CHIP_IS_E1x(bp)) {
4396 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4397 sizeof(struct host_hc_status_block_e2));
4398 if (!sb->e2_sb)
4399 goto alloc_mem_err;
4400 } else {
4401 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4402 sizeof(struct host_hc_status_block_e1x));
4403 if (!sb->e1x_sb)
4404 goto alloc_mem_err;
4405 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004406 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004407
4408 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4409 * set shortcuts for it.
4410 */
4411 if (!IS_FCOE_IDX(index))
4412 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004413
4414 /* Tx */
4415 if (!skip_tx_queue(bp, index)) {
4416 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004417 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004418 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004419
Merav Sicron51c1a582012-03-18 10:33:38 +00004420 DP(NETIF_MSG_IFUP,
4421 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004422 index, cos);
4423
Joe Perchescd2b0382014-02-20 13:25:51 -08004424 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4425 sizeof(struct sw_tx_bd),
4426 GFP_KERNEL);
4427 if (!txdata->tx_buf_ring)
4428 goto alloc_mem_err;
4429 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4430 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4431 if (!txdata->tx_desc_ring)
4432 goto alloc_mem_err;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004433 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004434 }
4435
4436 /* Rx */
4437 if (!skip_rx_queue(bp, index)) {
4438 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Joe Perchescd2b0382014-02-20 13:25:51 -08004439 bnx2x_fp(bp, index, rx_buf_ring) =
4440 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4441 if (!bnx2x_fp(bp, index, rx_buf_ring))
4442 goto alloc_mem_err;
4443 bnx2x_fp(bp, index, rx_desc_ring) =
4444 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4445 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4446 if (!bnx2x_fp(bp, index, rx_desc_ring))
4447 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004448
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004449 /* Seed all CQEs by 1s */
Joe Perchescd2b0382014-02-20 13:25:51 -08004450 bnx2x_fp(bp, index, rx_comp_ring) =
4451 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4452 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4453 if (!bnx2x_fp(bp, index, rx_comp_ring))
4454 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004455
4456 /* SGE ring */
Joe Perchescd2b0382014-02-20 13:25:51 -08004457 bnx2x_fp(bp, index, rx_page_ring) =
4458 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4459 GFP_KERNEL);
4460 if (!bnx2x_fp(bp, index, rx_page_ring))
4461 goto alloc_mem_err;
4462 bnx2x_fp(bp, index, rx_sge_ring) =
4463 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4464 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4465 if (!bnx2x_fp(bp, index, rx_sge_ring))
4466 goto alloc_mem_err;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004467 /* RX BD ring */
4468 bnx2x_set_next_page_rx_bd(fp);
4469
4470 /* CQ ring */
4471 bnx2x_set_next_page_rx_cq(fp);
4472
4473 /* BDs */
4474 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4475 if (ring_size < rx_ring_size)
4476 goto alloc_mem_err;
4477 }
4478
4479 return 0;
4480
4481/* handles low memory cases */
4482alloc_mem_err:
4483 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4484 index, ring_size);
4485 /* FW will drop all packets if queue is not big enough,
4486 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004487 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004488 */
4489 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004490 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004491 /* release memory allocated for this queue */
4492 bnx2x_free_fp_mem_at(bp, index);
4493 return -ENOMEM;
4494 }
4495 return 0;
4496}
4497
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004498static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004499{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004500 if (!NO_FCOE(bp))
4501 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004502 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004503 /* we will fail load process instead of mark
4504 * NO_FCOE_FLAG
4505 */
4506 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004507
4508 return 0;
4509}
4510
stephen hemmingera8f47eb2014-01-09 22:20:11 -08004511static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
Merav Sicron55c11942012-11-07 00:45:48 +00004512{
4513 int i;
4514
4515 /* 1. Allocate FP for leading - fatal if error
4516 * 2. Allocate RSS - fix number of queues if error
4517 */
4518
4519 /* leading */
4520 if (bnx2x_alloc_fp_mem_at(bp, 0))
4521 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004522
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004523 /* RSS */
4524 for_each_nondefault_eth_queue(bp, i)
4525 if (bnx2x_alloc_fp_mem_at(bp, i))
4526 break;
4527
4528 /* handle memory failures */
4529 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4530 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4531
4532 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004533 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004534 if (CNIC_SUPPORT(bp))
4535 /* move non eth FPs next to last eth FP
4536 * must be done in that order
4537 * FCOE_IDX < FWD_IDX < OOO_IDX
4538 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004539
Merav Sicron55c11942012-11-07 00:45:48 +00004540 /* move FCoE fp even NO_FCOE_FLAG is on */
4541 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4542 bp->num_ethernet_queues -= delta;
4543 bp->num_queues = bp->num_ethernet_queues +
4544 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004545 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4546 bp->num_queues + delta, bp->num_queues);
4547 }
4548
4549 return 0;
4550}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004551
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004552void bnx2x_free_mem_bp(struct bnx2x *bp)
4553{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004554 int i;
4555
4556 for (i = 0; i < bp->fp_array_size; i++)
4557 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004558 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004559 kfree(bp->sp_objs);
4560 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004561 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004562 kfree(bp->msix_table);
4563 kfree(bp->ilt);
4564}
4565
Bill Pemberton0329aba2012-12-03 09:24:24 -05004566int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004567{
4568 struct bnx2x_fastpath *fp;
4569 struct msix_entry *tbl;
4570 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004571 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004572 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004573 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004574
Ariel Elior6383c0b2011-07-14 08:31:57 +00004575 /*
4576 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004577 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004578 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004579 msix_table_size = bp->igu_sb_cnt;
4580 if (IS_PF(bp))
4581 msix_table_size++;
4582 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004583
4584 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004585 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004586 bp->fp_array_size = fp_array_size;
4587 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004588
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004589 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004590 if (!fp)
4591 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004592 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004593 fp[i].tpa_info =
4594 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4595 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4596 if (!(fp[i].tpa_info))
4597 goto alloc_err;
4598 }
4599
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004600 bp->fp = fp;
4601
Barak Witkowski15192a82012-06-19 07:48:28 +00004602 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004603 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004604 GFP_KERNEL);
4605 if (!bp->sp_objs)
4606 goto alloc_err;
4607
4608 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004609 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004610 GFP_KERNEL);
4611 if (!bp->fp_stats)
4612 goto alloc_err;
4613
Merav Sicron65565882012-06-19 07:48:26 +00004614 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004615 txq_array_size =
4616 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4617 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4618
4619 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4620 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004621 if (!bp->bnx2x_txq)
4622 goto alloc_err;
4623
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004624 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004625 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004626 if (!tbl)
4627 goto alloc_err;
4628 bp->msix_table = tbl;
4629
4630 /* ilt */
4631 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4632 if (!ilt)
4633 goto alloc_err;
4634 bp->ilt = ilt;
4635
4636 return 0;
4637alloc_err:
4638 bnx2x_free_mem_bp(bp);
4639 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004640}
4641
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004642int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004643{
4644 struct bnx2x *bp = netdev_priv(dev);
4645
4646 if (unlikely(!netif_running(dev)))
4647 return 0;
4648
Yuval Mintz5d07d862012-09-13 02:56:21 +00004649 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004650 return bnx2x_nic_load(bp, LOAD_NORMAL);
4651}
4652
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004653int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4654{
4655 u32 sel_phy_idx = 0;
4656 if (bp->link_params.num_phys <= 1)
4657 return INT_PHY;
4658
4659 if (bp->link_vars.link_up) {
4660 sel_phy_idx = EXT_PHY1;
4661 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4662 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4663 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4664 sel_phy_idx = EXT_PHY2;
4665 } else {
4666
4667 switch (bnx2x_phy_selection(&bp->link_params)) {
4668 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4669 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4670 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4671 sel_phy_idx = EXT_PHY1;
4672 break;
4673 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4674 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4675 sel_phy_idx = EXT_PHY2;
4676 break;
4677 }
4678 }
4679
4680 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004681}
4682int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4683{
4684 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4685 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004686 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004687 * swapping is enabled). So when swapping is enabled, we need to reverse
4688 * the configuration
4689 */
4690
4691 if (bp->link_params.multi_phy_config &
4692 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4693 if (sel_phy_idx == EXT_PHY1)
4694 sel_phy_idx = EXT_PHY2;
4695 else if (sel_phy_idx == EXT_PHY2)
4696 sel_phy_idx = EXT_PHY1;
4697 }
4698 return LINK_CONFIG_IDX(sel_phy_idx);
4699}
4700
Merav Sicron55c11942012-11-07 00:45:48 +00004701#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004702int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4703{
4704 struct bnx2x *bp = netdev_priv(dev);
4705 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4706
4707 switch (type) {
4708 case NETDEV_FCOE_WWNN:
4709 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4710 cp->fcoe_wwn_node_name_lo);
4711 break;
4712 case NETDEV_FCOE_WWPN:
4713 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4714 cp->fcoe_wwn_port_name_lo);
4715 break;
4716 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004717 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004718 return -EINVAL;
4719 }
4720
4721 return 0;
4722}
4723#endif
4724
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004725/* called with rtnl_lock */
4726int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4727{
4728 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004729
4730 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004731 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004732 return -EAGAIN;
4733 }
4734
4735 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004736 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4737 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004738 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004739 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004740
4741 /* This does not race with packet allocation
4742 * because the actual alloc size is
4743 * only updated as part of load
4744 */
4745 dev->mtu = new_mtu;
4746
Michał Mirosław66371c42011-04-12 09:38:23 +00004747 return bnx2x_reload_if_running(dev);
4748}
4749
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004750netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004751 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004752{
4753 struct bnx2x *bp = netdev_priv(dev);
4754
4755 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004756 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004757 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004758 features &= ~NETIF_F_GRO;
4759 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004760
4761 return features;
4762}
4763
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004764int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004765{
4766 struct bnx2x *bp = netdev_priv(dev);
4767 u32 flags = bp->flags;
Eric Dumazet8802f572013-05-18 07:14:53 +00004768 u32 changes;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004769 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004770
4771 if (features & NETIF_F_LRO)
4772 flags |= TPA_ENABLE_FLAG;
4773 else
4774 flags &= ~TPA_ENABLE_FLAG;
4775
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004776 if (features & NETIF_F_GRO)
4777 flags |= GRO_ENABLE_FLAG;
4778 else
4779 flags &= ~GRO_ENABLE_FLAG;
4780
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004781 if (features & NETIF_F_LOOPBACK) {
4782 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4783 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4784 bnx2x_reload = true;
4785 }
4786 } else {
4787 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4788 bp->link_params.loopback_mode = LOOPBACK_NONE;
4789 bnx2x_reload = true;
4790 }
4791 }
4792
Eric Dumazet8802f572013-05-18 07:14:53 +00004793 changes = flags ^ bp->flags;
4794
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004795 /* if GRO is changed while LRO is enabled, don't force a reload */
Eric Dumazet8802f572013-05-18 07:14:53 +00004796 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4797 changes &= ~GRO_ENABLE_FLAG;
4798
4799 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004800 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004801
4802 bp->flags = flags;
Michał Mirosław66371c42011-04-12 09:38:23 +00004803
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004804 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004805 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4806 return bnx2x_reload_if_running(dev);
4807 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004808 }
4809
Michał Mirosław66371c42011-04-12 09:38:23 +00004810 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004811}
4812
4813void bnx2x_tx_timeout(struct net_device *dev)
4814{
4815 struct bnx2x *bp = netdev_priv(dev);
4816
4817#ifdef BNX2X_STOP_ON_ERROR
4818 if (!bp->panic)
4819 bnx2x_panic();
4820#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004821
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004822 /* This allows the netif to be shutdown gracefully before resetting */
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004823 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004824}
4825
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004826int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4827{
4828 struct net_device *dev = pci_get_drvdata(pdev);
4829 struct bnx2x *bp;
4830
4831 if (!dev) {
4832 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4833 return -ENODEV;
4834 }
4835 bp = netdev_priv(dev);
4836
4837 rtnl_lock();
4838
4839 pci_save_state(pdev);
4840
4841 if (!netif_running(dev)) {
4842 rtnl_unlock();
4843 return 0;
4844 }
4845
4846 netif_device_detach(dev);
4847
Yuval Mintz5d07d862012-09-13 02:56:21 +00004848 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004849
4850 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4851
4852 rtnl_unlock();
4853
4854 return 0;
4855}
4856
4857int bnx2x_resume(struct pci_dev *pdev)
4858{
4859 struct net_device *dev = pci_get_drvdata(pdev);
4860 struct bnx2x *bp;
4861 int rc;
4862
4863 if (!dev) {
4864 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4865 return -ENODEV;
4866 }
4867 bp = netdev_priv(dev);
4868
4869 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004870 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004871 return -EAGAIN;
4872 }
4873
4874 rtnl_lock();
4875
4876 pci_restore_state(pdev);
4877
4878 if (!netif_running(dev)) {
4879 rtnl_unlock();
4880 return 0;
4881 }
4882
4883 bnx2x_set_power_state(bp, PCI_D0);
4884 netif_device_attach(dev);
4885
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004886 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4887
4888 rtnl_unlock();
4889
4890 return rc;
4891}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004892
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004893void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4894 u32 cid)
4895{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004896 if (!cxt) {
4897 BNX2X_ERR("bad context pointer %p\n", cxt);
4898 return;
4899 }
4900
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004901 /* ustorm cxt validation */
4902 cxt->ustorm_ag_context.cdu_usage =
4903 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4904 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4905 /* xcontext validation */
4906 cxt->xstorm_ag_context.cdu_reserved =
4907 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4908 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4909}
4910
Eric Dumazet1191cb82012-04-27 21:39:21 +00004911static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4912 u8 fw_sb_id, u8 sb_index,
4913 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004914{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004915 u32 addr = BAR_CSTRORM_INTMEM +
4916 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4917 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004918 DP(NETIF_MSG_IFUP,
4919 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4920 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004921}
4922
Eric Dumazet1191cb82012-04-27 21:39:21 +00004923static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4924 u16 fw_sb_id, u8 sb_index,
4925 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004926{
4927 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4928 u32 addr = BAR_CSTRORM_INTMEM +
4929 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004930 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004931 /* clear and set */
4932 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4933 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004934 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004935 DP(NETIF_MSG_IFUP,
4936 "port %x fw_sb_id %d sb_index %d disable %d\n",
4937 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004938}
4939
4940void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4941 u8 sb_index, u8 disable, u16 usec)
4942{
4943 int port = BP_PORT(bp);
4944 u8 ticks = usec / BNX2X_BTR;
4945
4946 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4947
4948 disable = disable ? 1 : (usec ? 0 : 1);
4949 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4950}
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004951
4952void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4953 u32 verbose)
4954{
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004955 smp_mb__before_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004956 set_bit(flag, &bp->sp_rtnl_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004957 smp_mb__after_atomic();
Yuval Mintz230bb0f2014-02-12 18:19:56 +02004958 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4959 flag);
4960 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4961}
4962EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);