blob: 90045c920d09c811bf5b8ce223574cecd3ce568f [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Yuval Mintz247fa822013-01-14 05:11:50 +00003 * Copyright (c) 2007-2013 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Yuval Mintz99690852013-01-14 05:11:49 +000024#include <net/tcp.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000025#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070026#include <net/ip6_checksum.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030027#include <net/busy_poll.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000028#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000031#include "bnx2x_sp.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000043 * source onto the target. Update txdata pointers and related
44 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Barak Witkowski15192a82012-06-19 07:48:28 +000050 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
Merav Sicron65565882012-06-19 07:48:26 +000054 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
Yuval Mintz34d56262013-08-28 01:13:01 +030056 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
Ariel Elior72754082011-11-13 04:34:31 +000057
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
60
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000061 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000064
Yuval Mintz34d56262013-08-28 01:13:01 +030065 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
Barak Witkowski15192a82012-06-19 07:48:28 +000070 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
72
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
75
Merav Sicron65565882012-06-19 07:48:26 +000076 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
79 */
80
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
83 (bp)->max_cos;
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
87 }
88
Yuval Mintz4864a162013-01-10 04:53:39 +000089 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
Merav Sicron65565882012-06-19 07:48:26 +000091 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000093}
94
Ariel Elior8ca5e172013-01-01 05:22:34 +000095/**
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
97 *
98 * @bp: driver handle
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
101 *
102 */
103void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
104{
105 if (IS_PF(bp)) {
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
107
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
113 "bc %d.%d.%d%s%s",
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
118 } else {
Ariel Elior64112802013-01-07 00:50:23 +0000119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
Ariel Elior8ca5e172013-01-01 05:22:34 +0000120 }
121}
122
David S. Miller4b87f922013-01-15 15:05:59 -0500123/**
Yuval Mintz4864a162013-01-10 04:53:39 +0000124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
125 *
126 * @bp: driver handle
127 * @delta: number of eth queues which were not allocated
128 */
129static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
130{
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
132
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000134 * backward along the array could cause memory to be overridden
Yuval Mintz4864a162013-01-10 04:53:39 +0000135 */
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
140
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
144 }
145 }
146}
147
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300148int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
149
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000150/* free skb in the packet ring at pos idx
151 * return idx of last bd freed
152 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000153static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +0000154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000156{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd;
163
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end);
166
Merav Sicron51c1a582012-03-18 10:33:38 +0000167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000168 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169
170 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000174
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
179 bnx2x_panic();
180 }
181#endif
182 new_cons = nbd + tx_buf->first_bd;
183
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
186
187 /* Skip a parse bd... */
188 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 }
196
197 /* now free frags */
198 while (nbd > 0) {
199
Ariel Elior6383c0b2011-07-14 08:31:57 +0000200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
203 if (--nbd)
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 }
206
207 /* release skb */
208 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000209 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000210 (*pkts_compl)++;
211 (*bytes_compl) += skb->len;
212 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000213
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000214 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000215 tx_buf->first_bd = 0;
216 tx_buf->skb = NULL;
217
218 return new_cons;
219}
220
Ariel Elior6383c0b2011-07-14 08:31:57 +0000221int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000222{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000223 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000225 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000226
227#ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
229 return -1;
230#endif
231
Ariel Elior6383c0b2011-07-14 08:31:57 +0000232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000235
236 while (sw_cons != hw_cons) {
237 u16 pkt_cons;
238
239 pkt_cons = TX_BD(sw_cons);
240
Merav Sicron51c1a582012-03-18 10:33:38 +0000241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000244
Tom Herbert2df1a702011-11-28 16:33:37 +0000245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz2de67432013-01-23 03:21:43 +0000246 &pkts_compl, &bytes_compl);
Tom Herbert2df1a702011-11-28 16:33:37 +0000247
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000248 sw_cons++;
249 }
250
Tom Herbert2df1a702011-11-28 16:33:37 +0000251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
252
Ariel Elior6383c0b2011-07-14 08:31:57 +0000253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000255
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
260 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000264 */
265 smp_mb();
266
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000267 if (unlikely(netif_tx_queue_stopped(txq))) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000268 /* Taking tx_lock() is needed to prevent re-enabling the queue
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
272 *
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
275 * stops the queue
276 */
277
278 __netif_tx_lock(txq, smp_processor_id());
279
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +0000282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000283 netif_tx_wake_queue(txq);
284
285 __netif_tx_unlock(txq);
286 }
287 return 0;
288}
289
290static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
291 u16 idx)
292{
293 u16 last_max = fp->last_max_sge;
294
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
297}
298
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000299static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
300 u16 sge_len,
301 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000302{
303 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000304 u16 last_max, last_elem, first_elem;
305 u16 delta = 0;
306 u16 i;
307
308 if (!sge_len)
309 return;
310
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000315
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000318
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000321 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000323
324 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000327
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
330 last_elem++;
331
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
335 break;
336
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000339 }
340
341 if (delta > 0) {
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
345 }
346
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
350}
351
Yuval Mintz2de67432013-01-23 03:21:43 +0000352/* Get Toeplitz hash value in the skb using the value from the
Eric Dumazete52fcb22011-11-14 06:05:34 +0000353 * CQE (calculated by HW).
354 */
355static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000356 const struct eth_fast_path_rx_cqe *cqe,
357 bool *l4_rxhash)
Eric Dumazete52fcb22011-11-14 06:05:34 +0000358{
Yuval Mintz2de67432013-01-23 03:21:43 +0000359 /* Get Toeplitz hash from CQE */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000360 if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
363
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000367 return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000368 }
369 *l4_rxhash = false;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000370 return 0;
371}
372
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000373static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000374 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300375 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000376{
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
381 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000384
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
388
Eric Dumazete52fcb22011-11-14 06:05:34 +0000389 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300390 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000391 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300392 fp->rx_buf_size, DMA_FROM_DEVICE);
393 /*
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
397 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000398
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000401 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
403 return;
404 }
405
Eric Dumazete52fcb22011-11-14 06:05:34 +0000406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000409 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
412
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
415
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
Yuval Mintz924d75a2013-01-23 03:21:44 +0000426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000427 tpa_info->gro_size = gro_size;
428 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300429
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000430#ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432#ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
434#else
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
436#endif
437 fp->tpa_queue_used);
438#endif
439}
440
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000441/* Timestamp option length allowed for TPA aggregation:
442 *
443 * nop nop kind length echo val
444 */
445#define TPA_TSTAMP_OPT_LEN 12
446/**
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000447 * bnx2x_set_gro_params - compute GRO values
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000448 *
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000449 * @skb: packet skb
Dmitry Kravkove8920672011-05-04 23:52:40 +0000450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
452 * aggregation.
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000453 * @pkt_len: length of all segments
Dmitry Kravkove8920672011-05-04 23:52:40 +0000454 *
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
Yuval Mintz2de67432013-01-23 03:21:43 +0000457 * Compute number of aggregated segments, and gso_type.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000458 */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000459static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000462{
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000463 /* TPA aggregation won't have either IP options or TCP options
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000465 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
467
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000469 PRS_FLAG_OVERETH_IPV6) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300470 hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
472 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300473 hdrs_len += sizeof(struct iphdr);
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
475 }
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000476
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
479 *
480 * Otherwise FW would close the aggregation.
481 */
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
484
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
486
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
489 */
Yuval Mintzab5777d2013-03-11 05:17:47 +0000490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000491}
492
Michal Schmidt996dedb2013-09-05 22:13:09 +0200493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000495{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
499 dma_addr_t mapping;
500
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
503 return -ENOMEM;
504 }
505
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
Yuval Mintz924d75a2013-01-23 03:21:44 +0000507 SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
511 return -ENOMEM;
512 }
513
514 sw_buf->page = page;
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
516
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
519
520 return 0;
521}
522
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000523static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000524 struct bnx2x_agg_info *tpa_info,
525 u16 pages,
526 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 struct eth_end_agg_rx_cqe *cqe,
528 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529{
530 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300533 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000534 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000535
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000537
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
541 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000542
543 /* This is needed in order to enable forwarding support */
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000544 if (frag_size)
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintzab5777d2013-03-11 05:17:47 +0000546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000548
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000549#ifdef BNX2X_STOP_ON_ERROR
Yuval Mintz924d75a2013-01-23 03:21:44 +0000550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
552 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000554 bnx2x_panic();
555 return -EINVAL;
556 }
557#endif
558
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000562
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
567 else /* LRO */
Yuval Mintz924d75a2013-01-23 03:21:44 +0000568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000569
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000570 rx_pg = &fp->rx_page_ring[sge_idx];
571 old_rx_pg = *rx_pg;
572
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000576 if (unlikely(err)) {
Barak Witkowski15192a82012-06-19 07:48:28 +0000577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578 return err;
579 }
580
Yuval Mintz16a5fd92013-06-02 00:06:18 +0000581 /* Unmap the page as we're going to pass it to the stack */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
Yuval Mintz924d75a2013-01-23 03:21:44 +0000584 SGE_PAGES, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000585 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
588 else { /* GRO */
589 int rem;
590 int offset = 0;
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
595 if (offset)
596 get_page(old_rx_pg.page);
597 offset += len;
598 }
599 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000600
601 skb->data_len += frag_len;
Yuval Mintz924d75a2013-01-23 03:21:44 +0000602 skb->truesize += SGE_PAGES;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000603 skb->len += frag_len;
604
605 frag_size -= frag_len;
606 }
607
608 return 0;
609}
610
Eric Dumazetd46d1322012-12-10 12:16:06 +0000611static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
612{
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
615 else
616 kfree(data);
617}
618
Michal Schmidt996dedb2013-09-05 22:13:09 +0200619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
Eric Dumazetd46d1322012-12-10 12:16:06 +0000620{
Michal Schmidt996dedb2013-09-05 22:13:09 +0200621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000625
Michal Schmidt996dedb2013-09-05 22:13:09 +0200626 return netdev_alloc_frag(fp->rx_frag_size);
627 }
628
629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000630}
631
Yuval Mintz99690852013-01-14 05:11:49 +0000632#ifdef CONFIG_INET
633static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
634{
635 const struct iphdr *iph = ip_hdr(skb);
636 struct tcphdr *th;
637
638 skb_set_transport_header(skb, sizeof(struct iphdr));
639 th = tcp_hdr(skb);
640
641 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
642 iph->saddr, iph->daddr, 0);
643}
644
645static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
646{
647 struct ipv6hdr *iph = ipv6_hdr(skb);
648 struct tcphdr *th;
649
650 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
651 th = tcp_hdr(skb);
652
653 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
654 &iph->saddr, &iph->daddr, 0);
655}
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000656
657static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
658 void (*gro_func)(struct bnx2x*, struct sk_buff*))
659{
660 skb_set_network_header(skb, 0);
661 gro_func(bp, skb);
662 tcp_gro_complete(skb);
663}
Yuval Mintz99690852013-01-14 05:11:49 +0000664#endif
665
666static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
667 struct sk_buff *skb)
668{
669#ifdef CONFIG_INET
Yuval Mintzcbf1de72013-01-17 03:26:21 +0000670 if (skb_shinfo(skb)->gso_size) {
Yuval Mintz99690852013-01-14 05:11:49 +0000671 switch (be16_to_cpu(skb->protocol)) {
672 case ETH_P_IP:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000673 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000674 break;
675 case ETH_P_IPV6:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000676 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
Yuval Mintz99690852013-01-14 05:11:49 +0000677 break;
678 default:
Yuval Mintz2c2d06d2013-04-24 01:44:58 +0000679 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
Yuval Mintz99690852013-01-14 05:11:49 +0000680 be16_to_cpu(skb->protocol));
681 }
Yuval Mintz99690852013-01-14 05:11:49 +0000682 }
683#endif
684 napi_gro_receive(&fp->napi, skb);
685}
686
Eric Dumazet1191cb82012-04-27 21:39:21 +0000687static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
688 struct bnx2x_agg_info *tpa_info,
689 u16 pages,
690 struct eth_end_agg_rx_cqe *cqe,
691 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000692{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300693 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000694 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300695 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000696 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000697 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300698 u8 old_tpa_state = tpa_info->tpa_state;
699
700 tpa_info->tpa_state = BNX2X_TPA_STOP;
701
702 /* If we there was an error during the handling of the TPA_START -
703 * drop this aggregation.
704 */
705 if (old_tpa_state == BNX2X_TPA_ERROR)
706 goto drop;
707
Eric Dumazete52fcb22011-11-14 06:05:34 +0000708 /* Try to allocate the new data */
Michal Schmidt996dedb2013-09-05 22:13:09 +0200709 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000710 /* Unmap skb in the pool anyway, as we are going to change
711 pool entry status to BNX2X_TPA_STOP even if new skb allocation
712 fails. */
713 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800714 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000715 if (likely(new_data))
Eric Dumazetd46d1322012-12-10 12:16:06 +0000716 skb = build_skb(data, fp->rx_frag_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000717
Eric Dumazete52fcb22011-11-14 06:05:34 +0000718 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000719#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800720 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000721 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800722 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000723 bnx2x_panic();
724 return;
725 }
726#endif
727
Eric Dumazete52fcb22011-11-14 06:05:34 +0000728 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000729 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000730 skb->rxhash = tpa_info->rxhash;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000731 skb->l4_rxhash = tpa_info->l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000732
733 skb->protocol = eth_type_trans(skb, bp->dev);
734 skb->ip_summed = CHECKSUM_UNNECESSARY;
735
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000736 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
737 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300738 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000739 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
Yuval Mintz99690852013-01-14 05:11:49 +0000740 bnx2x_gro_receive(bp, fp, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000742 DP(NETIF_MSG_RX_STATUS,
743 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000744 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000745 }
746
Eric Dumazete52fcb22011-11-14 06:05:34 +0000747 /* put new data in bin */
748 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000749
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300750 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000751 }
Eric Dumazetd46d1322012-12-10 12:16:06 +0000752 bnx2x_frag_free(fp, new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300753drop:
754 /* drop the packet and keep the buffer in the bin */
755 DP(NETIF_MSG_RX_STATUS,
756 "Failed to allocate or map a new skb - dropping packet!\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000757 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000758}
759
Michal Schmidt996dedb2013-09-05 22:13:09 +0200760static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
761 u16 index, gfp_t gfp_mask)
Eric Dumazet1191cb82012-04-27 21:39:21 +0000762{
763 u8 *data;
764 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
765 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
766 dma_addr_t mapping;
767
Michal Schmidt996dedb2013-09-05 22:13:09 +0200768 data = bnx2x_frag_alloc(fp, gfp_mask);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000769 if (unlikely(data == NULL))
770 return -ENOMEM;
771
772 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
773 fp->rx_buf_size,
774 DMA_FROM_DEVICE);
775 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000776 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +0000777 BNX2X_ERR("Can't map rx data\n");
778 return -ENOMEM;
779 }
780
781 rx_buf->data = data;
782 dma_unmap_addr_set(rx_buf, mapping, mapping);
783
784 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
785 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
786
787 return 0;
788}
789
Barak Witkowski15192a82012-06-19 07:48:28 +0000790static
791void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
792 struct bnx2x_fastpath *fp,
793 struct bnx2x_eth_q_stats *qstats)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000794{
Michal Schmidte4889212012-09-13 12:59:44 +0000795 /* Do nothing if no L4 csum validation was done.
796 * We do not check whether IP csum was validated. For IPv4 we assume
797 * that if the card got as far as validating the L4 csum, it also
798 * validated the IP csum. IPv6 has no IP csum.
799 */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000800 if (cqe->fast_path_cqe.status_flags &
Michal Schmidte4889212012-09-13 12:59:44 +0000801 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000802 return;
803
Michal Schmidte4889212012-09-13 12:59:44 +0000804 /* If L4 validation was done, check if an error was found. */
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000805
806 if (cqe->fast_path_cqe.type_error_flags &
807 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
808 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
Barak Witkowski15192a82012-06-19 07:48:28 +0000809 qstats->hw_csum_err++;
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000810 else
811 skb->ip_summed = CHECKSUM_UNNECESSARY;
812}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000813
814int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
815{
816 struct bnx2x *bp = fp->bp;
817 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300818 u16 sw_comp_cons, sw_comp_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000819 int rx_pkt = 0;
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300820 union eth_rx_cqe *cqe;
821 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000822
823#ifdef BNX2X_STOP_ON_ERROR
824 if (unlikely(bp->panic))
825 return 0;
826#endif
827
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000828 bd_cons = fp->rx_bd_cons;
829 bd_prod = fp->rx_bd_prod;
830 bd_prod_fw = bd_prod;
831 sw_comp_cons = fp->rx_comp_cons;
832 sw_comp_prod = fp->rx_comp_prod;
833
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300834 comp_ring_cons = RCQ_BD(sw_comp_cons);
835 cqe = &fp->rx_comp_ring[comp_ring_cons];
836 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000837
838 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300839 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000840
Dmitry Kravkov75b29452013-06-19 01:36:05 +0300841 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000842 struct sw_rx_bd *rx_buf = NULL;
843 struct sk_buff *skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000844 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300845 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000846 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000847 u8 *data;
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000848 bool l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000849
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300850#ifdef BNX2X_STOP_ON_ERROR
851 if (unlikely(bp->panic))
852 return 0;
853#endif
854
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000855 bd_prod = RX_BD(bd_prod);
856 bd_cons = RX_BD(bd_cons);
857
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300858 cqe_fp_flags = cqe_fp->type_error_flags;
859 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000860
Merav Sicron51c1a582012-03-18 10:33:38 +0000861 DP(NETIF_MSG_RX_STATUS,
862 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
863 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300864 cqe_fp_flags, cqe_fp->status_flags,
865 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000866 le16_to_cpu(cqe_fp->vlan_tag),
867 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000868
869 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300870 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000871 bnx2x_sp_event(fp, cqe);
872 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000873 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000874
Eric Dumazete52fcb22011-11-14 06:05:34 +0000875 rx_buf = &fp->rx_buf_ring[bd_cons];
876 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000877
Eric Dumazete52fcb22011-11-14 06:05:34 +0000878 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000879 struct bnx2x_agg_info *tpa_info;
880 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300881#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000882 /* sanity check */
883 if (fp->disable_tpa &&
884 (CQE_TYPE_START(cqe_fp_type) ||
885 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000886 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000887 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300888#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889
Eric Dumazete52fcb22011-11-14 06:05:34 +0000890 if (CQE_TYPE_START(cqe_fp_type)) {
891 u16 queue = cqe_fp->queue_index;
892 DP(NETIF_MSG_RX_STATUS,
893 "calling tpa_start on queue %d\n",
894 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000895
Eric Dumazete52fcb22011-11-14 06:05:34 +0000896 bnx2x_tpa_start(fp, queue,
897 bd_cons, bd_prod,
898 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000899
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000900 goto next_rx;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000901 }
902 queue = cqe->end_agg_cqe.queue_index;
903 tpa_info = &fp->tpa_info[queue];
904 DP(NETIF_MSG_RX_STATUS,
905 "calling tpa_stop on queue %d\n",
906 queue);
907
908 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
909 tpa_info->len_on_bd;
910
911 if (fp->mode == TPA_MODE_GRO)
912 pages = (frag_size + tpa_info->full_page - 1) /
913 tpa_info->full_page;
914 else
915 pages = SGE_PAGE_ALIGN(frag_size) >>
916 SGE_PAGE_SHIFT;
917
918 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
919 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000920#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000921 if (bp->panic)
922 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000923#endif
924
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000925 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
926 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000927 }
928 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000929 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000930 pad = cqe_fp->placement_offset;
931 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000932 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000933 pad + RX_COPY_THRESH,
934 DMA_FROM_DEVICE);
935 pad += NET_SKB_PAD;
936 prefetch(data + pad); /* speedup eth_type_trans() */
937 /* is this an error packet? */
938 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000939 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000940 "ERROR flags %x rx packet %u\n",
941 cqe_fp_flags, sw_comp_cons);
Barak Witkowski15192a82012-06-19 07:48:28 +0000942 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000943 goto reuse_rx;
944 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000945
Eric Dumazete52fcb22011-11-14 06:05:34 +0000946 /* Since we don't have a jumbo ring
947 * copy small packets if mtu > 1500
948 */
949 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
950 (len <= RX_COPY_THRESH)) {
951 skb = netdev_alloc_skb_ip_align(bp->dev, len);
952 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000953 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000954 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000955 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000956 goto reuse_rx;
957 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000958 memcpy(skb->data, data + pad, len);
959 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
960 } else {
Michal Schmidt996dedb2013-09-05 22:13:09 +0200961 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
962 GFP_ATOMIC) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000963 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000964 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800965 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000966 DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +0000967 skb = build_skb(data, fp->rx_frag_size);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000968 if (unlikely(!skb)) {
Eric Dumazetd46d1322012-12-10 12:16:06 +0000969 bnx2x_frag_free(fp, data);
Barak Witkowski15192a82012-06-19 07:48:28 +0000970 bnx2x_fp_qstats(bp, fp)->
971 rx_skb_alloc_failed++;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000972 goto next_rx;
973 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000976 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
977 "ERROR packet dropped because of alloc failure\n");
Barak Witkowski15192a82012-06-19 07:48:28 +0000978 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000979reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000980 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000981 goto next_rx;
982 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000983 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000985 skb_put(skb, len);
986 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000987
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000988 /* Set Toeplitz hash for a none-LRO skb */
Eric Dumazeta334b5f2012-07-09 06:02:24 +0000989 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
990 skb->l4_rxhash = l4_rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000991
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000992 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000993
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000994 if (bp->dev->features & NETIF_F_RXCSUM)
Barak Witkowski15192a82012-06-19 07:48:28 +0000995 bnx2x_csum_validate(skb, cqe, fp,
996 bnx2x_fp_qstats(bp, fp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000997
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000998 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000999
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001000 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1001 PARSING_FLAGS_VLAN)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001002 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001003 le16_to_cpu(cqe_fp->vlan_tag));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001004
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001005 skb_mark_napi_id(skb, &fp->napi);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001006
1007 if (bnx2x_fp_ll_polling(fp))
1008 netif_receive_skb(skb);
1009 else
1010 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001011next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +00001012 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001013
1014 bd_cons = NEXT_RX_IDX(bd_cons);
1015 bd_prod = NEXT_RX_IDX(bd_prod);
1016 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1017 rx_pkt++;
1018next_cqe:
1019 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1020 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1021
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001022 /* mark CQE as free */
1023 BNX2X_SEED_CQE(cqe_fp);
1024
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001025 if (rx_pkt == budget)
1026 break;
Dmitry Kravkov75b29452013-06-19 01:36:05 +03001027
1028 comp_ring_cons = RCQ_BD(sw_comp_cons);
1029 cqe = &fp->rx_comp_ring[comp_ring_cons];
1030 cqe_fp = &cqe->fast_path_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001031 } /* while */
1032
1033 fp->rx_bd_cons = bd_cons;
1034 fp->rx_bd_prod = bd_prod_fw;
1035 fp->rx_comp_cons = sw_comp_cons;
1036 fp->rx_comp_prod = sw_comp_prod;
1037
1038 /* Update producers */
1039 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1040 fp->rx_sge_prod);
1041
1042 fp->rx_pkt += rx_pkt;
1043 fp->rx_calls++;
1044
1045 return rx_pkt;
1046}
1047
1048static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1049{
1050 struct bnx2x_fastpath *fp = fp_cookie;
1051 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001052 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001053
Merav Sicron51c1a582012-03-18 10:33:38 +00001054 DP(NETIF_MSG_INTR,
1055 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001056 fp->index, fp->fw_sb_id, fp->igu_sb_id);
Yuval Mintzecf01c22013-04-22 02:53:03 +00001057
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001058 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001059
1060#ifdef BNX2X_STOP_ON_ERROR
1061 if (unlikely(bp->panic))
1062 return IRQ_HANDLED;
1063#endif
1064
1065 /* Handle Rx and Tx according to MSI-X vector */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001066 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00001067 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001068
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001069 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001070 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1071
1072 return IRQ_HANDLED;
1073}
1074
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075/* HW Lock for shared dual port PHYs */
1076void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1077{
1078 mutex_lock(&bp->port.phy_mutex);
1079
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001080 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001081}
1082
1083void bnx2x_release_phy_lock(struct bnx2x *bp)
1084{
Yaniv Rosner8203c4b2012-11-27 03:46:33 +00001085 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086
1087 mutex_unlock(&bp->port.phy_mutex);
1088}
1089
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001090/* calculates MF speed according to current linespeed and MF configuration */
1091u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1092{
1093 u16 line_speed = bp->link_vars.line_speed;
1094 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001095 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1096 bp->mf_config[BP_VN(bp)]);
1097
1098 /* Calculate the current MAX line speed limit for the MF
1099 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001100 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001101 if (IS_MF_SI(bp))
1102 line_speed = (line_speed * maxCfg) / 100;
1103 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001104 u16 vn_max_rate = maxCfg * 100;
1105
1106 if (vn_max_rate < line_speed)
1107 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +00001108 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08001109 }
1110
1111 return line_speed;
1112}
1113
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001114/**
1115 * bnx2x_fill_report_data - fill link report data to report
1116 *
1117 * @bp: driver handle
1118 * @data: link state to update
1119 *
1120 * It uses a none-atomic bit operations because is called under the mutex.
1121 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001122static void bnx2x_fill_report_data(struct bnx2x *bp,
1123 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001124{
1125 u16 line_speed = bnx2x_get_mf_speed(bp);
1126
1127 memset(data, 0, sizeof(*data));
1128
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001129 /* Fill the report data: effective line speed */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001130 data->line_speed = line_speed;
1131
1132 /* Link is down */
1133 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1134 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1135 &data->link_report_flags);
1136
1137 /* Full DUPLEX */
1138 if (bp->link_vars.duplex == DUPLEX_FULL)
1139 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1140
1141 /* Rx Flow Control is ON */
1142 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1143 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1144
1145 /* Tx Flow Control is ON */
1146 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1147 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1148}
1149
1150/**
1151 * bnx2x_link_report - report link status to OS.
1152 *
1153 * @bp: driver handle
1154 *
1155 * Calls the __bnx2x_link_report() under the same locking scheme
1156 * as a link/PHY state managing code to ensure a consistent link
1157 * reporting.
1158 */
1159
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001160void bnx2x_link_report(struct bnx2x *bp)
1161{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001162 bnx2x_acquire_phy_lock(bp);
1163 __bnx2x_link_report(bp);
1164 bnx2x_release_phy_lock(bp);
1165}
1166
1167/**
1168 * __bnx2x_link_report - report link status to OS.
1169 *
1170 * @bp: driver handle
1171 *
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001172 * None atomic implementation.
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001173 * Should be called under the phy_lock.
1174 */
1175void __bnx2x_link_report(struct bnx2x *bp)
1176{
1177 struct bnx2x_link_report_data cur_data;
1178
1179 /* reread mf_cfg */
Ariel Eliorad5afc82013-01-01 05:22:26 +00001180 if (IS_PF(bp) && !CHIP_IS_E1(bp))
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001181 bnx2x_read_mf_cfg(bp);
1182
1183 /* Read the current link report info */
1184 bnx2x_fill_report_data(bp, &cur_data);
1185
1186 /* Don't report link down or exactly the same link status twice */
1187 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1188 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1189 &bp->last_reported_link.link_report_flags) &&
1190 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1191 &cur_data.link_report_flags)))
1192 return;
1193
1194 bp->link_cnt++;
1195
1196 /* We are going to report a new link parameters now -
1197 * remember the current data for the next time.
1198 */
1199 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1200
1201 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1202 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203 netif_carrier_off(bp->dev);
1204 netdev_err(bp->dev, "NIC Link is Down\n");
1205 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001206 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001207 const char *duplex;
1208 const char *flow;
1209
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001210 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001211
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001212 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1213 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001214 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215 else
Joe Perches94f05b02011-08-14 12:16:20 +00001216 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001218 /* Handle the FC at the end so that only these flags would be
1219 * possibly set. This way we may easily check if there is no FC
1220 * enabled.
1221 */
1222 if (cur_data.link_report_flags) {
1223 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1224 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001225 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1226 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001227 flow = "ON - receive & transmit";
1228 else
1229 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001230 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001231 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232 }
Joe Perches94f05b02011-08-14 12:16:20 +00001233 } else {
1234 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001235 }
Joe Perches94f05b02011-08-14 12:16:20 +00001236 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1237 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001238 }
1239}
1240
Eric Dumazet1191cb82012-04-27 21:39:21 +00001241static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1242{
1243 int i;
1244
1245 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1246 struct eth_rx_sge *sge;
1247
1248 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1249 sge->addr_hi =
1250 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1251 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1252
1253 sge->addr_lo =
1254 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1255 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1256 }
1257}
1258
1259static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1260 struct bnx2x_fastpath *fp, int last)
1261{
1262 int i;
1263
1264 for (i = 0; i < last; i++) {
1265 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1266 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1267 u8 *data = first_buf->data;
1268
1269 if (data == NULL) {
1270 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1271 continue;
1272 }
1273 if (tpa_info->tpa_state == BNX2X_TPA_START)
1274 dma_unmap_single(&bp->pdev->dev,
1275 dma_unmap_addr(first_buf, mapping),
1276 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazetd46d1322012-12-10 12:16:06 +00001277 bnx2x_frag_free(fp, data);
Eric Dumazet1191cb82012-04-27 21:39:21 +00001278 first_buf->data = NULL;
1279 }
1280}
1281
Merav Sicron55c11942012-11-07 00:45:48 +00001282void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1283{
1284 int j;
1285
1286 for_each_rx_queue_cnic(bp, j) {
1287 struct bnx2x_fastpath *fp = &bp->fp[j];
1288
1289 fp->rx_bd_cons = 0;
1290
1291 /* Activate BD ring */
1292 /* Warning!
1293 * this will generate an interrupt (to the TSTORM)
1294 * must only be done after chip is initialized
1295 */
1296 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1297 fp->rx_sge_prod);
1298 }
1299}
1300
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001301void bnx2x_init_rx_rings(struct bnx2x *bp)
1302{
1303 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001304 u16 ring_prod;
1305 int i, j;
1306
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001307 /* Allocate TPA resources */
Merav Sicron55c11942012-11-07 00:45:48 +00001308 for_each_eth_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001309 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001310
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001311 DP(NETIF_MSG_IFUP,
1312 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1313
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001314 if (!fp->disable_tpa) {
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001315 /* Fill the per-aggregation pool */
David S. Miller8decf862011-09-22 03:23:13 -04001316 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001317 struct bnx2x_agg_info *tpa_info =
1318 &fp->tpa_info[i];
1319 struct sw_rx_bd *first_buf =
1320 &tpa_info->first_buf;
1321
Michal Schmidt996dedb2013-09-05 22:13:09 +02001322 first_buf->data =
1323 bnx2x_frag_alloc(fp, GFP_KERNEL);
Eric Dumazete52fcb22011-11-14 06:05:34 +00001324 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001325 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1326 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001327 bnx2x_free_tpa_pool(bp, fp, i);
1328 fp->disable_tpa = 1;
1329 break;
1330 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001331 dma_unmap_addr_set(first_buf, mapping, 0);
1332 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001333 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001334
1335 /* "next page" elements initialization */
1336 bnx2x_set_next_page_sgl(fp);
1337
1338 /* set SGEs bit mask */
1339 bnx2x_init_sge_ring_bit_mask(fp);
1340
1341 /* Allocate SGEs and initialize the ring elements */
1342 for (i = 0, ring_prod = 0;
1343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1344
Michal Schmidt996dedb2013-09-05 22:13:09 +02001345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1346 GFP_KERNEL) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001347 BNX2X_ERR("was only able to allocate %d rx sges\n",
1348 i);
1349 BNX2X_ERR("disabling TPA for queue[%d]\n",
1350 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001351 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001352 bnx2x_free_rx_sge_range(bp, fp,
1353 ring_prod);
1354 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001355 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001356 fp->disable_tpa = 1;
1357 ring_prod = 0;
1358 break;
1359 }
1360 ring_prod = NEXT_SGE_IDX(ring_prod);
1361 }
1362
1363 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364 }
1365 }
1366
Merav Sicron55c11942012-11-07 00:45:48 +00001367 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001368 struct bnx2x_fastpath *fp = &bp->fp[j];
1369
1370 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001371
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001372 /* Activate BD ring */
1373 /* Warning!
1374 * this will generate an interrupt (to the TSTORM)
1375 * must only be done after chip is initialized
1376 */
1377 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1378 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001379
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001380 if (j != 0)
1381 continue;
1382
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001383 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001384 REG_WR(bp, BAR_USTRORM_INTMEM +
1385 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1386 U64_LO(fp->rx_comp_mapping));
1387 REG_WR(bp, BAR_USTRORM_INTMEM +
1388 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1389 U64_HI(fp->rx_comp_mapping));
1390 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001391 }
1392}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001393
Merav Sicron55c11942012-11-07 00:45:48 +00001394static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1395{
1396 u8 cos;
1397 struct bnx2x *bp = fp->bp;
1398
1399 for_each_cos_in_tx_queue(fp, cos) {
1400 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1401 unsigned pkts_compl = 0, bytes_compl = 0;
1402
1403 u16 sw_prod = txdata->tx_pkt_prod;
1404 u16 sw_cons = txdata->tx_pkt_cons;
1405
1406 while (sw_cons != sw_prod) {
1407 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1408 &pkts_compl, &bytes_compl);
1409 sw_cons++;
1410 }
1411
1412 netdev_tx_reset_queue(
1413 netdev_get_tx_queue(bp->dev,
1414 txdata->txq_index));
1415 }
1416}
1417
1418static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1419{
1420 int i;
1421
1422 for_each_tx_queue_cnic(bp, i) {
1423 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1424 }
1425}
1426
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001427static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1428{
1429 int i;
1430
Merav Sicron55c11942012-11-07 00:45:48 +00001431 for_each_eth_queue(bp, i) {
1432 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001433 }
1434}
1435
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001436static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1437{
1438 struct bnx2x *bp = fp->bp;
1439 int i;
1440
1441 /* ring wasn't allocated */
1442 if (fp->rx_buf_ring == NULL)
1443 return;
1444
1445 for (i = 0; i < NUM_RX_BD; i++) {
1446 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001447 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001448
Eric Dumazete52fcb22011-11-14 06:05:34 +00001449 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001450 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001451 dma_unmap_single(&bp->pdev->dev,
1452 dma_unmap_addr(rx_buf, mapping),
1453 fp->rx_buf_size, DMA_FROM_DEVICE);
1454
Eric Dumazete52fcb22011-11-14 06:05:34 +00001455 rx_buf->data = NULL;
Eric Dumazetd46d1322012-12-10 12:16:06 +00001456 bnx2x_frag_free(fp, data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001457 }
1458}
1459
Merav Sicron55c11942012-11-07 00:45:48 +00001460static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1461{
1462 int j;
1463
1464 for_each_rx_queue_cnic(bp, j) {
1465 bnx2x_free_rx_bds(&bp->fp[j]);
1466 }
1467}
1468
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001469static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1470{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001471 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472
Merav Sicron55c11942012-11-07 00:45:48 +00001473 for_each_eth_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001474 struct bnx2x_fastpath *fp = &bp->fp[j];
1475
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001476 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001477
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001478 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001479 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001480 }
1481}
1482
Merav Sicron55c11942012-11-07 00:45:48 +00001483void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1484{
1485 bnx2x_free_tx_skbs_cnic(bp);
1486 bnx2x_free_rx_skbs_cnic(bp);
1487}
1488
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001489void bnx2x_free_skbs(struct bnx2x *bp)
1490{
1491 bnx2x_free_tx_skbs(bp);
1492 bnx2x_free_rx_skbs(bp);
1493}
1494
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001495void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1496{
1497 /* load old values */
1498 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1499
1500 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1501 /* leave all but MAX value */
1502 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1503
1504 /* set new MAX value */
1505 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1506 & FUNC_MF_CFG_MAX_BW_MASK;
1507
1508 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1509 }
1510}
1511
Dmitry Kravkovca924292011-06-14 01:33:08 +00001512/**
1513 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1514 *
1515 * @bp: driver handle
1516 * @nvecs: number of vectors to be released
1517 */
1518static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001519{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001520 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521
Dmitry Kravkovca924292011-06-14 01:33:08 +00001522 if (nvecs == offset)
1523 return;
Ariel Eliorad5afc82013-01-01 05:22:26 +00001524
1525 /* VFs don't have a default SB */
1526 if (IS_PF(bp)) {
1527 free_irq(bp->msix_table[offset].vector, bp->dev);
1528 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1529 bp->msix_table[offset].vector);
1530 offset++;
1531 }
Merav Sicron55c11942012-11-07 00:45:48 +00001532
1533 if (CNIC_SUPPORT(bp)) {
1534 if (nvecs == offset)
1535 return;
1536 offset++;
1537 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001538
Dmitry Kravkovca924292011-06-14 01:33:08 +00001539 for_each_eth_queue(bp, i) {
1540 if (nvecs == offset)
1541 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001542 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1543 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001544
1545 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001546 }
1547}
1548
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001549void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001550{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001551 if (bp->flags & USING_MSIX_FLAG &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00001552 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1553 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1554
1555 /* vfs don't have a default status block */
1556 if (IS_PF(bp))
1557 nvecs++;
1558
1559 bnx2x_free_msix_irqs(bp, nvecs);
1560 } else {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001561 free_irq(bp->dev->irq, bp->dev);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001562 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001563}
1564
Merav Sicron0e8d2ec2012-06-19 07:48:30 +00001565int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001566{
Ariel Elior1ab44342013-01-01 05:22:23 +00001567 int msix_vec = 0, i, rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001568
Ariel Elior1ab44342013-01-01 05:22:23 +00001569 /* VFs don't have a default status block */
1570 if (IS_PF(bp)) {
1571 bp->msix_table[msix_vec].entry = msix_vec;
1572 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1573 bp->msix_table[0].entry);
1574 msix_vec++;
1575 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001576
Merav Sicron55c11942012-11-07 00:45:48 +00001577 /* Cnic requires an msix vector for itself */
1578 if (CNIC_SUPPORT(bp)) {
1579 bp->msix_table[msix_vec].entry = msix_vec;
1580 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1581 msix_vec, bp->msix_table[msix_vec].entry);
1582 msix_vec++;
1583 }
1584
Ariel Elior6383c0b2011-07-14 08:31:57 +00001585 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001586 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001587 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001588 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1589 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001590 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001591 }
1592
Ariel Elior1ab44342013-01-01 05:22:23 +00001593 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1594 msix_vec);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001595
Ariel Elior1ab44342013-01-01 05:22:23 +00001596 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001597
1598 /*
1599 * reconfigure number of tx/rx queues according to available
1600 * MSI-X vectors
1601 */
Merav Sicron55c11942012-11-07 00:45:48 +00001602 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001603 /* how less vectors we will have? */
Ariel Elior1ab44342013-01-01 05:22:23 +00001604 int diff = msix_vec - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001605
Merav Sicron51c1a582012-03-18 10:33:38 +00001606 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001607
1608 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1609
1610 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001611 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1612 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001613 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001614 /*
1615 * decrease number of queues by number of unallocated entries
1616 */
Merav Sicron55c11942012-11-07 00:45:48 +00001617 bp->num_ethernet_queues -= diff;
1618 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001619
Merav Sicron51c1a582012-03-18 10:33:38 +00001620 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001621 bp->num_queues);
1622 } else if (rc > 0) {
1623 /* Get by with single vector */
1624 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1625 if (rc) {
1626 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1627 rc);
1628 goto no_msix;
1629 }
1630
1631 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1632 bp->flags |= USING_SINGLE_MSIX_FLAG;
1633
Merav Sicron55c11942012-11-07 00:45:48 +00001634 BNX2X_DEV_INFO("set number of queues to 1\n");
1635 bp->num_ethernet_queues = 1;
1636 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001637 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001638 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001639 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001640 }
1641
1642 bp->flags |= USING_MSIX_FLAG;
1643
1644 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001645
1646no_msix:
1647 /* fall to INTx if not enough memory */
1648 if (rc == -ENOMEM)
1649 bp->flags |= DISABLE_MSI_FLAG;
1650
1651 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001652}
1653
1654static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1655{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001656 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001657
Ariel Eliorad5afc82013-01-01 05:22:26 +00001658 /* no default status block for vf */
1659 if (IS_PF(bp)) {
1660 rc = request_irq(bp->msix_table[offset++].vector,
1661 bnx2x_msix_sp_int, 0,
1662 bp->dev->name, bp->dev);
1663 if (rc) {
1664 BNX2X_ERR("request sp irq failed\n");
1665 return -EBUSY;
1666 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001667 }
1668
Merav Sicron55c11942012-11-07 00:45:48 +00001669 if (CNIC_SUPPORT(bp))
1670 offset++;
1671
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001672 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673 struct bnx2x_fastpath *fp = &bp->fp[i];
1674 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1675 bp->dev->name, i);
1676
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001677 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001678 bnx2x_msix_fp_int, 0, fp->name, fp);
1679 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001680 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1681 bp->msix_table[offset].vector, rc);
1682 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001683 return -EBUSY;
1684 }
1685
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001686 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001687 }
1688
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001689 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00001690 if (IS_PF(bp)) {
1691 offset = 1 + CNIC_SUPPORT(bp);
1692 netdev_info(bp->dev,
1693 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1694 bp->msix_table[0].vector,
1695 0, bp->msix_table[offset].vector,
1696 i - 1, bp->msix_table[offset + i - 1].vector);
1697 } else {
1698 offset = CNIC_SUPPORT(bp);
1699 netdev_info(bp->dev,
1700 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1701 0, bp->msix_table[offset].vector,
1702 i - 1, bp->msix_table[offset + i - 1].vector);
1703 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001704 return 0;
1705}
1706
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001707int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001708{
1709 int rc;
1710
1711 rc = pci_enable_msi(bp->pdev);
1712 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001713 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001714 return -1;
1715 }
1716 bp->flags |= USING_MSI_FLAG;
1717
1718 return 0;
1719}
1720
1721static int bnx2x_req_irq(struct bnx2x *bp)
1722{
1723 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001724 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001725
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001726 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001727 flags = 0;
1728 else
1729 flags = IRQF_SHARED;
1730
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001731 if (bp->flags & USING_MSIX_FLAG)
1732 irq = bp->msix_table[0].vector;
1733 else
1734 irq = bp->pdev->irq;
1735
1736 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001737}
1738
Yuval Mintzc957d092013-06-25 08:50:11 +03001739static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001740{
1741 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001742 if (bp->flags & USING_MSIX_FLAG &&
1743 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001744 rc = bnx2x_req_msix_irqs(bp);
1745 if (rc)
1746 return rc;
1747 } else {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001748 rc = bnx2x_req_irq(bp);
1749 if (rc) {
1750 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1751 return rc;
1752 }
1753 if (bp->flags & USING_MSI_FLAG) {
1754 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001755 netdev_info(bp->dev, "using MSI IRQ %d\n",
1756 bp->dev->irq);
1757 }
1758 if (bp->flags & USING_MSIX_FLAG) {
1759 bp->dev->irq = bp->msix_table[0].vector;
1760 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1761 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001762 }
1763 }
1764
1765 return 0;
1766}
1767
Merav Sicron55c11942012-11-07 00:45:48 +00001768static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1769{
1770 int i;
1771
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001772 for_each_rx_queue_cnic(bp, i) {
1773 bnx2x_fp_init_lock(&bp->fp[i]);
Merav Sicron55c11942012-11-07 00:45:48 +00001774 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001775 }
Merav Sicron55c11942012-11-07 00:45:48 +00001776}
1777
Eric Dumazet1191cb82012-04-27 21:39:21 +00001778static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001779{
1780 int i;
1781
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001782 for_each_eth_queue(bp, i) {
1783 bnx2x_fp_init_lock(&bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001784 napi_enable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001785 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001786}
1787
Merav Sicron55c11942012-11-07 00:45:48 +00001788static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1789{
1790 int i;
1791
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001792 local_bh_disable();
1793 for_each_rx_queue_cnic(bp, i) {
Merav Sicron55c11942012-11-07 00:45:48 +00001794 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001795 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1796 mdelay(1);
1797 }
1798 local_bh_enable();
Merav Sicron55c11942012-11-07 00:45:48 +00001799}
1800
Eric Dumazet1191cb82012-04-27 21:39:21 +00001801static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001802{
1803 int i;
1804
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001805 local_bh_disable();
1806 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001807 napi_disable(&bnx2x_fp(bp, i, napi));
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03001808 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1809 mdelay(1);
1810 }
1811 local_bh_enable();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001812}
1813
1814void bnx2x_netif_start(struct bnx2x *bp)
1815{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001816 if (netif_running(bp->dev)) {
1817 bnx2x_napi_enable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001818 if (CNIC_LOADED(bp))
1819 bnx2x_napi_enable_cnic(bp);
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001820 bnx2x_int_enable(bp);
1821 if (bp->state == BNX2X_STATE_OPEN)
1822 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001823 }
1824}
1825
1826void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1827{
1828 bnx2x_int_disable_sync(bp, disable_hw);
1829 bnx2x_napi_disable(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00001830 if (CNIC_LOADED(bp))
1831 bnx2x_napi_disable_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001832}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001833
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001834u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1835{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001836 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001837
Merav Sicron55c11942012-11-07 00:45:48 +00001838 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001839 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1840 u16 ether_type = ntohs(hdr->h_proto);
1841
1842 /* Skip VLAN tag if present */
1843 if (ether_type == ETH_P_8021Q) {
1844 struct vlan_ethhdr *vhdr =
1845 (struct vlan_ethhdr *)skb->data;
1846
1847 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1848 }
1849
1850 /* If ethertype is FCoE or FIP - use FCoE ring */
1851 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001852 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001853 }
Merav Sicron55c11942012-11-07 00:45:48 +00001854
David S. Miller823dcd22011-08-20 10:39:12 -07001855 /* select a non-FCoE queue */
Eric Dumazetada7c192013-05-31 14:32:55 +00001856 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001857}
1858
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001859void bnx2x_set_num_queues(struct bnx2x *bp)
1860{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001861 /* RSS queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001862 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001863
Barak Witkowskia3348722012-04-23 03:04:46 +00001864 /* override in STORAGE SD modes */
1865 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00001866 bp->num_ethernet_queues = 1;
1867
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001868 /* Add special queues */
Merav Sicron55c11942012-11-07 00:45:48 +00001869 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1870 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
Merav Sicron65565882012-06-19 07:48:26 +00001871
1872 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001873}
1874
David S. Miller823dcd22011-08-20 10:39:12 -07001875/**
1876 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1877 *
1878 * @bp: Driver handle
1879 *
1880 * We currently support for at most 16 Tx queues for each CoS thus we will
1881 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1882 * bp->max_cos.
1883 *
1884 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1885 * index after all ETH L2 indices.
1886 *
1887 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1888 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001889 * 16..31,...) with indices that are not coupled with any real Tx queue.
David S. Miller823dcd22011-08-20 10:39:12 -07001890 *
1891 * The proper configuration of skb->queue_mapping is handled by
1892 * bnx2x_select_queue() and __skb_tx_hash().
1893 *
1894 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1895 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1896 */
Merav Sicron55c11942012-11-07 00:45:48 +00001897static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001898{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001899 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001900
Merav Sicron65565882012-06-19 07:48:26 +00001901 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
Merav Sicron55c11942012-11-07 00:45:48 +00001902 rx = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001903
1904/* account for fcoe queue */
Merav Sicron55c11942012-11-07 00:45:48 +00001905 if (include_cnic && !NO_FCOE(bp)) {
1906 rx++;
1907 tx++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001908 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00001909
1910 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1911 if (rc) {
1912 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1913 return rc;
1914 }
1915 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1916 if (rc) {
1917 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1918 return rc;
1919 }
1920
Merav Sicron51c1a582012-03-18 10:33:38 +00001921 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001922 tx, rx);
1923
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001924 return rc;
1925}
1926
Eric Dumazet1191cb82012-04-27 21:39:21 +00001927static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001928{
1929 int i;
1930
1931 for_each_queue(bp, i) {
1932 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001933 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001934
1935 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1936 if (IS_FCOE_IDX(i))
1937 /*
1938 * Although there are no IP frames expected to arrive to
1939 * this ring we still want to add an
1940 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1941 * overrun attack.
1942 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001943 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001944 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001945 mtu = bp->dev->mtu;
1946 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1947 IP_HEADER_ALIGNMENT_PADDING +
1948 ETH_OVREHEAD +
1949 mtu +
1950 BNX2X_FW_RX_ALIGN_END;
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001951 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
Eric Dumazetd46d1322012-12-10 12:16:06 +00001952 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1953 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1954 else
1955 fp->rx_frag_size = 0;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001956 }
1957}
1958
Ariel Elior60cad4e2013-09-04 14:09:22 +03001959static int bnx2x_init_rss(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001960{
1961 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001962 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1963
Yuval Mintz16a5fd92013-06-02 00:06:18 +00001964 /* Prepare the initial contents for the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001965 * enabled
1966 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001967 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1968 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001969 bp->fp->cl_id +
1970 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001971
1972 /*
1973 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1974 * per-port, so if explicit configuration is needed , do it only
1975 * for a PMF.
1976 *
1977 * For 57712 and newer on the other hand it's a per-function
1978 * configuration.
1979 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001980 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001981}
1982
Ariel Elior60cad4e2013-09-04 14:09:22 +03001983int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1984 bool config_hash, bool enable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001985{
Yuval Mintz3b603062012-03-18 10:33:39 +00001986 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001987
1988 /* Although RSS is meaningless when there is a single HW queue we
1989 * still need it enabled in order to have HW Rx hash generated.
1990 *
1991 * if (!is_eth_multi(bp))
1992 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1993 */
1994
Dmitry Kravkov96305232012-04-03 18:41:30 +00001995 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001996
1997 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1998
Ariel Elior60cad4e2013-09-04 14:09:22 +03001999 if (enable) {
2000 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002001
Ariel Elior60cad4e2013-09-04 14:09:22 +03002002 /* RSS configuration */
2003 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2004 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2005 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2006 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2007 if (rss_obj->udp_rss_v4)
2008 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2009 if (rss_obj->udp_rss_v6)
2010 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2011 } else {
2012 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2013 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002014
Dmitry Kravkov96305232012-04-03 18:41:30 +00002015 /* Hash bits */
2016 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002017
Merav Sicron5d317c6a2012-06-19 07:48:24 +00002018 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002019
Dmitry Kravkov96305232012-04-03 18:41:30 +00002020 if (config_hash) {
2021 /* RSS keys */
Ariel Elior60cad4e2013-09-04 14:09:22 +03002022 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
Dmitry Kravkov96305232012-04-03 18:41:30 +00002023 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002024 }
2025
Ariel Elior60cad4e2013-09-04 14:09:22 +03002026 if (IS_PF(bp))
2027 return bnx2x_config_rss(bp, &params);
2028 else
2029 return bnx2x_vfpf_config_rss(bp, &params);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002030}
2031
Eric Dumazet1191cb82012-04-27 21:39:21 +00002032static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002033{
Yuval Mintz3b603062012-03-18 10:33:39 +00002034 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002035
2036 /* Prepare parameters for function state transitions */
2037 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2038
2039 func_params.f_obj = &bp->func_obj;
2040 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2041
2042 func_params.params.hw_init.load_phase = load_code;
2043
2044 return bnx2x_func_state_change(bp, &func_params);
2045}
2046
2047/*
2048 * Cleans the object that have internal lists without sending
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002049 * ramrods. Should be run when interrupts are disabled.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002050 */
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002051void bnx2x_squeeze_objects(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002052{
2053 int rc;
2054 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00002055 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Barak Witkowski15192a82012-06-19 07:48:28 +00002056 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002057
2058 /***************** Cleanup MACs' object first *************************/
2059
2060 /* Wait for completion of requested */
2061 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2062 /* Perform a dry cleanup */
2063 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2064
2065 /* Clean ETH primary MAC */
2066 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
Barak Witkowski15192a82012-06-19 07:48:28 +00002067 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002068 &ramrod_flags);
2069 if (rc != 0)
2070 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2071
2072 /* Cleanup UC list */
2073 vlan_mac_flags = 0;
2074 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2075 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2076 &ramrod_flags);
2077 if (rc != 0)
2078 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2079
2080 /***************** Now clean mcast object *****************************/
2081 rparam.mcast_obj = &bp->mcast_obj;
2082 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2083
Yuval Mintz8b09be52013-08-01 17:30:59 +03002084 /* Add a DEL command... - Since we're doing a driver cleanup only,
2085 * we take a lock surrounding both the initial send and the CONTs,
2086 * as we don't want a true completion to disrupt us in the middle.
2087 */
2088 netif_addr_lock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002089 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2090 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00002091 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2092 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002093
2094 /* ...and wait until all pending commands are cleared */
2095 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2096 while (rc != 0) {
2097 if (rc < 0) {
2098 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2099 rc);
Yuval Mintz8b09be52013-08-01 17:30:59 +03002100 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002101 return;
2102 }
2103
2104 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2105 }
Yuval Mintz8b09be52013-08-01 17:30:59 +03002106 netif_addr_unlock_bh(bp->dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002107}
2108
2109#ifndef BNX2X_STOP_ON_ERROR
2110#define LOAD_ERROR_EXIT(bp, label) \
2111 do { \
2112 (bp)->state = BNX2X_STATE_ERROR; \
2113 goto label; \
2114 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002115
2116#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2117 do { \
2118 bp->cnic_loaded = false; \
2119 goto label; \
2120 } while (0)
2121#else /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002122#define LOAD_ERROR_EXIT(bp, label) \
2123 do { \
2124 (bp)->state = BNX2X_STATE_ERROR; \
2125 (bp)->panic = 1; \
2126 return -EBUSY; \
2127 } while (0)
Merav Sicron55c11942012-11-07 00:45:48 +00002128#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2129 do { \
2130 bp->cnic_loaded = false; \
2131 (bp)->panic = 1; \
2132 return -EBUSY; \
2133 } while (0)
2134#endif /*BNX2X_STOP_ON_ERROR*/
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002135
Ariel Eliorad5afc82013-01-01 05:22:26 +00002136static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
Yuval Mintz452427b2012-03-26 20:47:07 +00002137{
Ariel Eliorad5afc82013-01-01 05:22:26 +00002138 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2139 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2140 return;
2141}
Yuval Mintz452427b2012-03-26 20:47:07 +00002142
Ariel Eliorad5afc82013-01-01 05:22:26 +00002143static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2144{
Ariel Elior8db573b2013-01-01 05:22:37 +00002145 int num_groups, vf_headroom = 0;
Ariel Eliorad5afc82013-01-01 05:22:26 +00002146 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002147
Ariel Eliorad5afc82013-01-01 05:22:26 +00002148 /* number of queues for statistics is number of eth queues + FCoE */
2149 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
Yuval Mintz452427b2012-03-26 20:47:07 +00002150
Ariel Eliorad5afc82013-01-01 05:22:26 +00002151 /* Total number of FW statistics requests =
2152 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2153 * and fcoe l2 queue) stats + num of queues (which includes another 1
2154 * for fcoe l2 queue if applicable)
2155 */
2156 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2157
Ariel Elior8db573b2013-01-01 05:22:37 +00002158 /* vf stats appear in the request list, but their data is allocated by
2159 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2160 * it is used to determine where to place the vf stats queries in the
2161 * request struct
2162 */
2163 if (IS_SRIOV(bp))
Ariel Elior64112802013-01-07 00:50:23 +00002164 vf_headroom = bnx2x_vf_headroom(bp);
Ariel Elior8db573b2013-01-01 05:22:37 +00002165
Ariel Eliorad5afc82013-01-01 05:22:26 +00002166 /* Request is built from stats_query_header and an array of
2167 * stats_query_cmd_group each of which contains
2168 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2169 * configured in the stats_query_header.
2170 */
2171 num_groups =
Ariel Elior8db573b2013-01-01 05:22:37 +00002172 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2173 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
Ariel Eliorad5afc82013-01-01 05:22:26 +00002174 1 : 0));
2175
Ariel Elior8db573b2013-01-01 05:22:37 +00002176 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2177 bp->fw_stats_num, vf_headroom, num_groups);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002178 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2179 num_groups * sizeof(struct stats_query_cmd_group);
2180
2181 /* Data for statistics requests + stats_counter
2182 * stats_counter holds per-STORM counters that are incremented
2183 * when STORM has finished with the current request.
2184 * memory for FCoE offloaded statistics are counted anyway,
2185 * even if they will not be sent.
2186 * VF stats are not accounted for here as the data of VF stats is stored
2187 * in memory allocated by the VF, not here.
2188 */
2189 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2190 sizeof(struct per_pf_stats) +
2191 sizeof(struct fcoe_statistics_params) +
2192 sizeof(struct per_queue_stats) * num_queue_stats +
2193 sizeof(struct stats_counter);
2194
2195 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2196 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2197
2198 /* Set shortcuts */
2199 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2200 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2201 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2202 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2203 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2204 bp->fw_stats_req_sz;
2205
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002206 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002207 U64_HI(bp->fw_stats_req_mapping),
2208 U64_LO(bp->fw_stats_req_mapping));
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002209 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002210 U64_HI(bp->fw_stats_data_mapping),
2211 U64_LO(bp->fw_stats_data_mapping));
2212 return 0;
2213
2214alloc_mem_err:
2215 bnx2x_free_fw_stats_mem(bp);
2216 BNX2X_ERR("Can't allocate FW stats memory\n");
2217 return -ENOMEM;
2218}
2219
2220/* send load request to mcp and analyze response */
2221static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2222{
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002223 u32 param;
2224
Ariel Eliorad5afc82013-01-01 05:22:26 +00002225 /* init fw_seq */
2226 bp->fw_seq =
2227 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2228 DRV_MSG_SEQ_NUMBER_MASK);
2229 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2230
2231 /* Get current FW pulse sequence */
2232 bp->fw_drv_pulse_wr_seq =
2233 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2234 DRV_PULSE_SEQ_MASK);
2235 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2236
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002237 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2238
2239 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2240 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2241
Ariel Eliorad5afc82013-01-01 05:22:26 +00002242 /* load request */
Dmitry Kravkov178135c2013-05-22 21:21:50 +00002243 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002244
2245 /* if mcp fails to respond we must abort */
2246 if (!(*load_code)) {
2247 BNX2X_ERR("MCP response failure, aborting\n");
2248 return -EBUSY;
Yuval Mintz452427b2012-03-26 20:47:07 +00002249 }
2250
Ariel Eliorad5afc82013-01-01 05:22:26 +00002251 /* If mcp refused (e.g. other port is in diagnostic mode) we
2252 * must abort
2253 */
2254 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2255 BNX2X_ERR("MCP refused load request, aborting\n");
2256 return -EBUSY;
2257 }
2258 return 0;
2259}
2260
2261/* check whether another PF has already loaded FW to chip. In
2262 * virtualized environments a pf from another VM may have already
2263 * initialized the device including loading FW
2264 */
2265int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2266{
2267 /* is another pf loaded on this engine? */
2268 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2269 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2270 /* build my FW version dword */
2271 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2272 (BCM_5710_FW_MINOR_VERSION << 8) +
2273 (BCM_5710_FW_REVISION_VERSION << 16) +
2274 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2275
2276 /* read loaded FW from chip */
2277 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2278
2279 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2280 loaded_fw, my_fw);
2281
2282 /* abort nic load if version mismatch */
2283 if (my_fw != loaded_fw) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00002284 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
Ariel Eliorad5afc82013-01-01 05:22:26 +00002285 loaded_fw, my_fw);
2286 return -EBUSY;
2287 }
2288 }
2289 return 0;
2290}
2291
2292/* returns the "mcp load_code" according to global load_count array */
2293static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2294{
2295 int path = BP_PATH(bp);
2296
2297 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2298 path, load_count[path][0], load_count[path][1],
2299 load_count[path][2]);
2300 load_count[path][0]++;
2301 load_count[path][1 + port]++;
2302 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2303 path, load_count[path][0], load_count[path][1],
2304 load_count[path][2]);
2305 if (load_count[path][0] == 1)
2306 return FW_MSG_CODE_DRV_LOAD_COMMON;
2307 else if (load_count[path][1 + port] == 1)
2308 return FW_MSG_CODE_DRV_LOAD_PORT;
2309 else
2310 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2311}
2312
2313/* mark PMF if applicable */
2314static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2315{
2316 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2317 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2318 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2319 bp->port.pmf = 1;
2320 /* We need the barrier to ensure the ordering between the
2321 * writing to bp->port.pmf here and reading it from the
2322 * bnx2x_periodic_task().
2323 */
2324 smp_mb();
2325 } else {
2326 bp->port.pmf = 0;
2327 }
2328
2329 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2330}
2331
2332static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2333{
2334 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2335 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2336 (bp->common.shmem2_base)) {
2337 if (SHMEM2_HAS(bp, dcc_support))
2338 SHMEM2_WR(bp, dcc_support,
2339 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2340 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2341 if (SHMEM2_HAS(bp, afex_driver_support))
2342 SHMEM2_WR(bp, afex_driver_support,
2343 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2344 }
2345
2346 /* Set AFEX default VLAN tag to an invalid value */
2347 bp->afex_def_vlan_tag = -1;
Yuval Mintz452427b2012-03-26 20:47:07 +00002348}
2349
Eric Dumazet1191cb82012-04-27 21:39:21 +00002350/**
2351 * bnx2x_bz_fp - zero content of the fastpath structure.
2352 *
2353 * @bp: driver handle
2354 * @index: fastpath index to be zeroed
2355 *
2356 * Makes sure the contents of the bp->fp[index].napi is kept
2357 * intact.
2358 */
2359static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2360{
2361 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00002362 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002363 struct napi_struct orig_napi = fp->napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002364 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
Yuval Mintzd76a6112013-06-02 00:06:17 +00002365
Eric Dumazet1191cb82012-04-27 21:39:21 +00002366 /* bzero bnx2x_fastpath contents */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00002367 if (fp->tpa_info)
2368 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2369 sizeof(struct bnx2x_agg_info));
2370 memset(fp, 0, sizeof(*fp));
Eric Dumazet1191cb82012-04-27 21:39:21 +00002371
2372 /* Restore the NAPI object as it has been already initialized */
2373 fp->napi = orig_napi;
Barak Witkowski15192a82012-06-19 07:48:28 +00002374 fp->tpa_info = orig_tpa_info;
Eric Dumazet1191cb82012-04-27 21:39:21 +00002375 fp->bp = bp;
2376 fp->index = index;
2377 if (IS_ETH_FP(fp))
2378 fp->max_cos = bp->max_cos;
2379 else
2380 /* Special queues support only one CoS */
2381 fp->max_cos = 1;
2382
Merav Sicron65565882012-06-19 07:48:26 +00002383 /* Init txdata pointers */
Merav Sicron65565882012-06-19 07:48:26 +00002384 if (IS_FCOE_FP(fp))
2385 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
Merav Sicron65565882012-06-19 07:48:26 +00002386 if (IS_ETH_FP(fp))
2387 for_each_cos_in_tx_queue(fp, cos)
2388 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2389 BNX2X_NUM_ETH_QUEUES(bp) + index];
2390
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002391 /* set the tpa flag for each queue. The tpa flag determines the queue
Eric Dumazet1191cb82012-04-27 21:39:21 +00002392 * minimal size so it must be set prior to queue memory allocation
2393 */
2394 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2395 (bp->flags & GRO_ENABLE_FLAG &&
2396 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2397 if (bp->flags & TPA_ENABLE_FLAG)
2398 fp->mode = TPA_MODE_LRO;
2399 else if (bp->flags & GRO_ENABLE_FLAG)
2400 fp->mode = TPA_MODE_GRO;
2401
Eric Dumazet1191cb82012-04-27 21:39:21 +00002402 /* We don't want TPA on an FCoE L2 ring */
2403 if (IS_FCOE_FP(fp))
2404 fp->disable_tpa = 1;
Merav Sicron55c11942012-11-07 00:45:48 +00002405}
2406
2407int bnx2x_load_cnic(struct bnx2x *bp)
2408{
2409 int i, rc, port = BP_PORT(bp);
2410
2411 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2412
2413 mutex_init(&bp->cnic_mutex);
2414
Ariel Eliorad5afc82013-01-01 05:22:26 +00002415 if (IS_PF(bp)) {
2416 rc = bnx2x_alloc_mem_cnic(bp);
2417 if (rc) {
2418 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2419 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2420 }
Merav Sicron55c11942012-11-07 00:45:48 +00002421 }
2422
2423 rc = bnx2x_alloc_fp_mem_cnic(bp);
2424 if (rc) {
2425 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2426 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2427 }
2428
2429 /* Update the number of queues with the cnic queues */
2430 rc = bnx2x_set_real_num_queues(bp, 1);
2431 if (rc) {
2432 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2433 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2434 }
2435
2436 /* Add all CNIC NAPI objects */
2437 bnx2x_add_all_napi_cnic(bp);
2438 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2439 bnx2x_napi_enable_cnic(bp);
2440
2441 rc = bnx2x_init_hw_func_cnic(bp);
2442 if (rc)
2443 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2444
2445 bnx2x_nic_init_cnic(bp);
2446
Ariel Eliorad5afc82013-01-01 05:22:26 +00002447 if (IS_PF(bp)) {
2448 /* Enable Timer scan */
2449 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Merav Sicron55c11942012-11-07 00:45:48 +00002450
Ariel Eliorad5afc82013-01-01 05:22:26 +00002451 /* setup cnic queues */
2452 for_each_cnic_queue(bp, i) {
2453 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2454 if (rc) {
2455 BNX2X_ERR("Queue setup failed\n");
2456 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2457 }
Merav Sicron55c11942012-11-07 00:45:48 +00002458 }
2459 }
2460
2461 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002462 bnx2x_set_rx_mode_inner(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002463
2464 /* re-read iscsi info */
2465 bnx2x_get_iscsi_info(bp);
2466 bnx2x_setup_cnic_irq_info(bp);
2467 bnx2x_setup_cnic_info(bp);
2468 bp->cnic_loaded = true;
2469 if (bp->state == BNX2X_STATE_OPEN)
2470 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2471
Merav Sicron55c11942012-11-07 00:45:48 +00002472 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2473
2474 return 0;
2475
2476#ifndef BNX2X_STOP_ON_ERROR
2477load_error_cnic2:
2478 /* Disable Timer scan */
2479 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2480
2481load_error_cnic1:
2482 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0);
2485 if (rc)
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n");
2489 bnx2x_free_fp_mem_cnic(bp);
2490 bnx2x_free_mem_cnic(bp);
2491 return rc;
2492#endif /* ! BNX2X_STOP_ON_ERROR */
Eric Dumazet1191cb82012-04-27 21:39:21 +00002493}
2494
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002495/* must be called with rtnl_lock */
2496int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2497{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002498 int port = BP_PORT(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002499 int i, rc = 0, load_code = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002500
Merav Sicron55c11942012-11-07 00:45:48 +00002501 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2502 DP(NETIF_MSG_IFUP,
2503 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2504
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002505#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00002506 if (unlikely(bp->panic)) {
2507 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002508 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00002509 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002510#endif
2511
2512 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2513
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002514 /* zero the structure w/o any lock, before SP handler is initialized */
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002515 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2516 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2517 &bp->last_reported_link.link_report_flags);
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00002518
Ariel Eliorad5afc82013-01-01 05:22:26 +00002519 if (IS_PF(bp))
2520 /* must be called before memory allocation and HW init */
2521 bnx2x_ilt_set_info(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002522
Ariel Elior6383c0b2011-07-14 08:31:57 +00002523 /*
2524 * Zero fastpath structures preserving invariants like napi, which are
2525 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00002526 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002527 */
Merav Sicron51c1a582012-03-18 10:33:38 +00002528 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002529 for_each_queue(bp, i)
2530 bnx2x_bz_fp(bp, i);
Merav Sicron55c11942012-11-07 00:45:48 +00002531 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2532 bp->num_cnic_queues) *
2533 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002534
Merav Sicron55c11942012-11-07 00:45:48 +00002535 bp->fcoe_init = false;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002536
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08002537 /* Set the receive queues buffer size */
2538 bnx2x_set_rx_buf_size(bp);
2539
Ariel Eliorad5afc82013-01-01 05:22:26 +00002540 if (IS_PF(bp)) {
2541 rc = bnx2x_alloc_mem(bp);
2542 if (rc) {
2543 BNX2X_ERR("Unable to allocate bp memory\n");
2544 return rc;
2545 }
2546 }
2547
2548 /* Allocated memory for FW statistics */
2549 if (bnx2x_alloc_fw_stats_mem(bp))
2550 LOAD_ERROR_EXIT(bp, load_error0);
2551
2552 /* need to be done after alloc mem, since it's self adjusting to amount
2553 * of memory available for RSS queues
2554 */
2555 rc = bnx2x_alloc_fp_mem(bp);
2556 if (rc) {
2557 BNX2X_ERR("Unable to allocate memory for fps\n");
2558 LOAD_ERROR_EXIT(bp, load_error0);
2559 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002560
Ariel Elior8d9ac292013-01-01 05:22:27 +00002561 /* request pf to initialize status blocks */
2562 if (IS_VF(bp)) {
2563 rc = bnx2x_vfpf_init(bp);
2564 if (rc)
2565 LOAD_ERROR_EXIT(bp, load_error0);
2566 }
2567
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002568 /* As long as bnx2x_alloc_mem() may possibly update
2569 * bp->num_queues, bnx2x_set_real_num_queues() should always
Merav Sicron55c11942012-11-07 00:45:48 +00002570 * come after it. At this stage cnic queues are not counted.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002571 */
Merav Sicron55c11942012-11-07 00:45:48 +00002572 rc = bnx2x_set_real_num_queues(bp, 0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002573 if (rc) {
2574 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002575 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002576 }
2577
Ariel Elior6383c0b2011-07-14 08:31:57 +00002578 /* configure multi cos mappings in kernel.
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002579 * this configuration may be overridden by a multi class queue
2580 * discipline or by a dcbx negotiation result.
Ariel Elior6383c0b2011-07-14 08:31:57 +00002581 */
2582 bnx2x_setup_tc(bp->dev, bp->max_cos);
2583
Merav Sicron26614ba2012-08-27 03:26:19 +00002584 /* Add all NAPI objects */
2585 bnx2x_add_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002586 DP(NETIF_MSG_IFUP, "napi added\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002587 bnx2x_napi_enable(bp);
2588
Ariel Eliorad5afc82013-01-01 05:22:26 +00002589 if (IS_PF(bp)) {
2590 /* set pf load just before approaching the MCP */
2591 bnx2x_set_pf_load(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002592
Ariel Eliorad5afc82013-01-01 05:22:26 +00002593 /* if mcp exists send load request and analyze response */
2594 if (!BP_NOMCP(bp)) {
2595 /* attempt to load pf */
2596 rc = bnx2x_nic_load_request(bp, &load_code);
2597 if (rc)
2598 LOAD_ERROR_EXIT(bp, load_error1);
Ariel Elior95c6c6162012-01-26 06:01:52 +00002599
Ariel Eliorad5afc82013-01-01 05:22:26 +00002600 /* what did mcp say? */
2601 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2602 if (rc) {
2603 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002604 LOAD_ERROR_EXIT(bp, load_error2);
2605 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002606 } else {
2607 load_code = bnx2x_nic_load_no_mcp(bp, port);
Ariel Eliord1e2d962012-01-26 06:01:49 +00002608 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002609
Ariel Eliorad5afc82013-01-01 05:22:26 +00002610 /* mark pmf if applicable */
2611 bnx2x_nic_load_pmf(bp, load_code);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002612
Ariel Eliorad5afc82013-01-01 05:22:26 +00002613 /* Init Function state controlling object */
2614 bnx2x__init_func_obj(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002615
Ariel Eliorad5afc82013-01-01 05:22:26 +00002616 /* Initialize HW */
2617 rc = bnx2x_init_hw(bp, load_code);
2618 if (rc) {
2619 BNX2X_ERR("HW init failed, aborting\n");
2620 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2621 LOAD_ERROR_EXIT(bp, load_error2);
2622 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002623 }
2624
Yuval Mintzecf01c22013-04-22 02:53:03 +00002625 bnx2x_pre_irq_nic_init(bp);
2626
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002627 /* Connect to IRQs */
2628 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002629 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002630 BNX2X_ERR("setup irqs failed\n");
2631 if (IS_PF(bp))
2632 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002633 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002634 }
2635
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002636 /* Init per-function objects */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002637 if (IS_PF(bp)) {
Yuval Mintzecf01c22013-04-22 02:53:03 +00002638 /* Setup NIC internals and enable interrupts */
2639 bnx2x_post_irq_nic_init(bp, load_code);
2640
Ariel Eliorad5afc82013-01-01 05:22:26 +00002641 bnx2x_init_bp_objs(bp);
Ariel Eliorb56e9672013-01-01 05:22:32 +00002642 bnx2x_iov_nic_init(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002643
Ariel Eliorad5afc82013-01-01 05:22:26 +00002644 /* Set AFEX default VLAN tag to an invalid value */
2645 bp->afex_def_vlan_tag = -1;
2646 bnx2x_nic_load_afex_dcc(bp, load_code);
2647 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2648 rc = bnx2x_func_start(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002649 if (rc) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002650 BNX2X_ERR("Function start failed!\n");
2651 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2652
Merav Sicron55c11942012-11-07 00:45:48 +00002653 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002654 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002655
Ariel Eliorad5afc82013-01-01 05:22:26 +00002656 /* Send LOAD_DONE command to MCP */
2657 if (!BP_NOMCP(bp)) {
2658 load_code = bnx2x_fw_command(bp,
2659 DRV_MSG_CODE_LOAD_DONE, 0);
2660 if (!load_code) {
2661 BNX2X_ERR("MCP response failure, aborting\n");
2662 rc = -EBUSY;
2663 LOAD_ERROR_EXIT(bp, load_error3);
2664 }
2665 }
2666
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002667 /* initialize FW coalescing state machines in RAM */
2668 bnx2x_update_coalesce(bp);
Ariel Elior60cad4e2013-09-04 14:09:22 +03002669 }
Ariel Elior0c14e5c2013-04-17 22:49:06 +00002670
Ariel Elior60cad4e2013-09-04 14:09:22 +03002671 /* setup the leading queue */
2672 rc = bnx2x_setup_leading(bp);
2673 if (rc) {
2674 BNX2X_ERR("Setup leading failed!\n");
2675 LOAD_ERROR_EXIT(bp, load_error3);
2676 }
2677
2678 /* set up the rest of the queues */
2679 for_each_nondefault_eth_queue(bp, i) {
2680 if (IS_PF(bp))
2681 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2682 else /* VF */
2683 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002684 if (rc) {
Ariel Elior60cad4e2013-09-04 14:09:22 +03002685 BNX2X_ERR("Queue %d setup failed\n", i);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002686 LOAD_ERROR_EXIT(bp, load_error3);
2687 }
Ariel Elior60cad4e2013-09-04 14:09:22 +03002688 }
Ariel Eliorad5afc82013-01-01 05:22:26 +00002689
Ariel Elior60cad4e2013-09-04 14:09:22 +03002690 /* setup rss */
2691 rc = bnx2x_init_rss(bp);
2692 if (rc) {
2693 BNX2X_ERR("PF RSS init failed\n");
2694 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002695 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002696
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002697 /* Now when Clients are configured we are ready to work */
2698 bp->state = BNX2X_STATE_OPEN;
2699
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002700 /* Configure a ucast MAC */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002701 if (IS_PF(bp))
2702 rc = bnx2x_set_eth_mac(bp, true);
Ariel Elior8d9ac292013-01-01 05:22:27 +00002703 else /* vf */
Dmitry Kravkovf8f4f612013-04-24 01:45:00 +00002704 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2705 true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002706 if (rc) {
2707 BNX2X_ERR("Setting Ethernet MAC failed\n");
Merav Sicron55c11942012-11-07 00:45:48 +00002708 LOAD_ERROR_EXIT(bp, load_error3);
Merav Sicron51c1a582012-03-18 10:33:38 +00002709 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002710
Ariel Eliorad5afc82013-01-01 05:22:26 +00002711 if (IS_PF(bp) && bp->pending_max) {
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002712 bnx2x_update_max_mf_config(bp, bp->pending_max);
2713 bp->pending_max = 0;
2714 }
2715
Ariel Eliorad5afc82013-01-01 05:22:26 +00002716 if (bp->port.pmf) {
2717 rc = bnx2x_initial_phy_init(bp, load_mode);
2718 if (rc)
2719 LOAD_ERROR_EXIT(bp, load_error3);
2720 }
Barak Witkowskic63da992012-12-05 23:04:03 +00002721 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002722
2723 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002724
2725 /* Initialize Rx filter. */
Yuval Mintz8b09be52013-08-01 17:30:59 +03002726 bnx2x_set_rx_mode_inner(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002727
2728 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002729 switch (load_mode) {
2730 case LOAD_NORMAL:
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002731 /* Tx queue should be only re-enabled */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002732 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002733 break;
2734
2735 case LOAD_OPEN:
2736 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002737 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002738 break;
2739
2740 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002741 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002742 bp->state = BNX2X_STATE_DIAG;
2743 break;
2744
2745 default:
2746 break;
2747 }
2748
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002749 if (bp->port.pmf)
Barak Witkowski4c704892012-12-02 04:05:47 +00002750 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002751 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002752 bnx2x__link_status_update(bp);
2753
2754 /* start the timer */
2755 mod_timer(&bp->timer, jiffies + bp->current_interval);
2756
Merav Sicron55c11942012-11-07 00:45:48 +00002757 if (CNIC_ENABLED(bp))
2758 bnx2x_load_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002759
Ariel Eliorad5afc82013-01-01 05:22:26 +00002760 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2761 /* mark driver is loaded in shmem2 */
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002762 u32 val;
2763 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2764 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2765 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2766 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2767 }
2768
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002769 /* Wait for all pending SP commands to complete */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002770 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002771 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
Yuval Mintz5d07d862012-09-13 02:56:21 +00002772 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002773 return -EBUSY;
2774 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002775
Barak Witkowski98768792012-06-19 07:48:31 +00002776 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2777 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2778 bnx2x_dcbx_init(bp, false);
2779
Merav Sicron55c11942012-11-07 00:45:48 +00002780 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2781
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002782 return 0;
2783
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002784#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002785load_error3:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002786 if (IS_PF(bp)) {
2787 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002788
Ariel Eliorad5afc82013-01-01 05:22:26 +00002789 /* Clean queueable objects */
2790 bnx2x_squeeze_objects(bp);
2791 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793 /* Free SKBs, SGEs, TPA pool and driver internals */
2794 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002795 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002796 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002797
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002798 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002799 bnx2x_free_irq(bp);
2800load_error2:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002801 if (IS_PF(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002802 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2803 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2804 }
2805
2806 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002807load_error1:
2808 bnx2x_napi_disable(bp);
Michal Schmidt722c6f52013-03-15 05:27:54 +00002809 bnx2x_del_all_napi(bp);
Ariel Eliorad5afc82013-01-01 05:22:26 +00002810
Ariel Elior889b9af2012-01-26 06:01:51 +00002811 /* clear pf_load status, as it was already set */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002812 if (IS_PF(bp))
2813 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002814load_error0:
Ariel Eliorad5afc82013-01-01 05:22:26 +00002815 bnx2x_free_fp_mem(bp);
2816 bnx2x_free_fw_stats_mem(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002817 bnx2x_free_mem(bp);
2818
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002819 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002820#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002821}
2822
Yuval Mintz7fa6f3402013-03-20 05:21:28 +00002823int bnx2x_drain_tx_queues(struct bnx2x *bp)
Ariel Eliorad5afc82013-01-01 05:22:26 +00002824{
2825 u8 rc = 0, cos, i;
2826
2827 /* Wait until tx fastpath tasks complete */
2828 for_each_tx_queue(bp, i) {
2829 struct bnx2x_fastpath *fp = &bp->fp[i];
2830
2831 for_each_cos_in_tx_queue(fp, cos)
2832 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2833 if (rc)
2834 return rc;
2835 }
2836 return 0;
2837}
2838
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002839/* must be called with rtnl_lock */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002840int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002841{
2842 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002843 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002844
Merav Sicron55c11942012-11-07 00:45:48 +00002845 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2846
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002847 /* mark driver is unloaded in shmem2 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002848 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002849 u32 val;
2850 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2851 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2852 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2853 }
2854
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002855 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
Ariel Eliorad5afc82013-01-01 05:22:26 +00002856 (bp->state == BNX2X_STATE_CLOSED ||
2857 bp->state == BNX2X_STATE_ERROR)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002858 /* We can get here if the driver has been unloaded
2859 * during parity error recovery and is either waiting for a
2860 * leader to complete or for other functions to unload and
2861 * then ifdown has been issued. In this case we want to
2862 * unload and let other functions to complete a recovery
2863 * process.
2864 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002865 bp->recovery_state = BNX2X_RECOVERY_DONE;
2866 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002867 bnx2x_release_leader_lock(bp);
2868 smp_mb();
2869
Merav Sicron51c1a582012-03-18 10:33:38 +00002870 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2871 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002872 return -EINVAL;
2873 }
2874
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002875 /* Nothing to do during unload if previous bnx2x_nic_load()
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002876 * have not completed successfully - all resources are released.
Yuval Mintz80bfe5c2013-01-23 03:21:49 +00002877 *
2878 * we can get here only after unsuccessful ndo_* callback, during which
2879 * dev->IFF_UP flag is still on.
2880 */
2881 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2882 return 0;
2883
2884 /* It's important to set the bp->state to the value different from
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002885 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2886 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2887 */
2888 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2889 smp_mb();
2890
Ariel Elior78c3bcc2013-06-20 17:39:08 +03002891 /* indicate to VFs that the PF is going down */
2892 bnx2x_iov_channel_down(bp);
2893
Merav Sicron55c11942012-11-07 00:45:48 +00002894 if (CNIC_LOADED(bp))
2895 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2896
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002897 /* Stop Tx */
2898 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002899 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002900
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002901 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002902
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002903 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002904
Ariel Eliorad5afc82013-01-01 05:22:26 +00002905 if (IS_PF(bp)) {
2906 /* Set ALWAYS_ALIVE bit in shmem */
2907 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2908 bnx2x_drv_pulse(bp);
2909 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2910 bnx2x_save_statistics(bp);
2911 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002912
Ariel Eliorad5afc82013-01-01 05:22:26 +00002913 /* wait till consumers catch up with producers in all queues */
2914 bnx2x_drain_tx_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002915
Ariel Elior9b176b62013-01-01 05:22:28 +00002916 /* if VF indicate to PF this function is going down (PF will delete sp
2917 * elements and clear initializations
2918 */
2919 if (IS_VF(bp))
2920 bnx2x_vfpf_close_vf(bp);
2921 else if (unload_mode != UNLOAD_RECOVERY)
2922 /* if this is a normal/close unload need to clean up chip*/
Yuval Mintz5d07d862012-09-13 02:56:21 +00002923 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002924 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002925 /* Send the UNLOAD_REQUEST to the MCP */
2926 bnx2x_send_unload_req(bp, unload_mode);
2927
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002928 /* Prevent transactions to host from the functions on the
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002929 * engine that doesn't reset global blocks in case of global
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002930 * attention once global blocks are reset and gates are opened
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002931 * (the engine which leader will perform the recovery
2932 * last).
2933 */
2934 if (!CHIP_IS_E1x(bp))
2935 bnx2x_pf_disable(bp);
2936
2937 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002938 bnx2x_netif_stop(bp, 1);
Merav Sicron26614ba2012-08-27 03:26:19 +00002939 /* Delete all NAPI objects */
2940 bnx2x_del_all_napi(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002941 if (CNIC_LOADED(bp))
2942 bnx2x_del_all_napi_cnic(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002943 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002944 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002945
2946 /* Report UNLOAD_DONE to MCP */
Yuval Mintz5d07d862012-09-13 02:56:21 +00002947 bnx2x_send_unload_done(bp, false);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002948 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002949
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002950 /*
Yuval Mintz16a5fd92013-06-02 00:06:18 +00002951 * At this stage no more interrupts will arrive so we may safely clean
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002952 * the queueable objects here in case they failed to get cleaned so far.
2953 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002954 if (IS_PF(bp))
2955 bnx2x_squeeze_objects(bp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002956
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002957 /* There should be no more pending SP commands at this stage */
2958 bp->sp_state = 0;
2959
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002960 bp->port.pmf = 0;
2961
2962 /* Free SKBs, SGEs, TPA pool and driver internals */
2963 bnx2x_free_skbs(bp);
Merav Sicron55c11942012-11-07 00:45:48 +00002964 if (CNIC_LOADED(bp))
2965 bnx2x_free_skbs_cnic(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002966 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002967 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002968
Ariel Eliorad5afc82013-01-01 05:22:26 +00002969 bnx2x_free_fp_mem(bp);
2970 if (CNIC_LOADED(bp))
Merav Sicron55c11942012-11-07 00:45:48 +00002971 bnx2x_free_fp_mem_cnic(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002972
Ariel Eliorad5afc82013-01-01 05:22:26 +00002973 if (IS_PF(bp)) {
Ariel Eliorad5afc82013-01-01 05:22:26 +00002974 if (CNIC_LOADED(bp))
2975 bnx2x_free_mem_cnic(bp);
2976 }
Ariel Eliorb4cddbd2013-08-28 01:13:03 +03002977 bnx2x_free_mem(bp);
2978
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002979 bp->state = BNX2X_STATE_CLOSED;
Merav Sicron55c11942012-11-07 00:45:48 +00002980 bp->cnic_loaded = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002981
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002982 /* Check if there are pending parity attentions. If there are - set
2983 * RECOVERY_IN_PROGRESS.
2984 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002985 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002986 bnx2x_set_reset_in_progress(bp);
2987
2988 /* Set RESET_IS_GLOBAL if needed */
2989 if (global)
2990 bnx2x_set_reset_global(bp);
2991 }
2992
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002993 /* The last driver must disable a "close the gate" if there is no
2994 * parity attention or "process kill" pending.
2995 */
Ariel Eliorad5afc82013-01-01 05:22:26 +00002996 if (IS_PF(bp) &&
2997 !bnx2x_clear_pf_load(bp) &&
2998 bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002999 bnx2x_disable_close_the_gate(bp);
3000
Merav Sicron55c11942012-11-07 00:45:48 +00003001 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3002
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003003 return 0;
3004}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003005
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003006int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3007{
3008 u16 pmcsr;
3009
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003010 /* If there is no power capability, silently succeed */
3011 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003012 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00003013 return 0;
3014 }
3015
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003016 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3017
3018 switch (state) {
3019 case PCI_D0:
3020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3021 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3022 PCI_PM_CTRL_PME_STATUS));
3023
3024 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3025 /* delay required during transition out of D3hot */
3026 msleep(20);
3027 break;
3028
3029 case PCI_D3hot:
3030 /* If there are other clients above don't
3031 shut down the power */
3032 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3033 return 0;
3034 /* Don't shut down the power for emulation and FPGA */
3035 if (CHIP_REV_IS_SLOW(bp))
3036 return 0;
3037
3038 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3039 pmcsr |= 3;
3040
3041 if (bp->wol)
3042 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3043
3044 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3045 pmcsr);
3046
3047 /* No more memory access after this point until
3048 * device is brought back to D0.
3049 */
3050 break;
3051
3052 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003053 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003054 return -EINVAL;
3055 }
3056 return 0;
3057}
3058
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003059/*
3060 * net_device service functions
3061 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003062int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003063{
3064 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003065 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003066 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3067 napi);
3068 struct bnx2x *bp = fp->bp;
3069
3070 while (1) {
3071#ifdef BNX2X_STOP_ON_ERROR
3072 if (unlikely(bp->panic)) {
3073 napi_complete(napi);
3074 return 0;
3075 }
3076#endif
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003077 if (!bnx2x_fp_lock_napi(fp))
3078 return work_done;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003079
Ariel Elior6383c0b2011-07-14 08:31:57 +00003080 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00003081 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3082 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003083
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003084 if (bnx2x_has_rx_work(fp)) {
3085 work_done += bnx2x_rx_int(fp, budget - work_done);
3086
3087 /* must not complete if we consumed full budget */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003088 if (work_done >= budget) {
3089 bnx2x_fp_unlock_napi(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003090 break;
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003091 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003092 }
3093
3094 /* Fall out from the NAPI loop if needed */
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003095 if (!bnx2x_fp_unlock_napi(fp) &&
3096 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Merav Sicron55c11942012-11-07 00:45:48 +00003097
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003098 /* No need to update SB for FCoE L2 ring as long as
3099 * it's connected to the default SB and the SB
3100 * has been updated when NAPI was scheduled.
3101 */
3102 if (IS_FCOE_FP(fp)) {
3103 napi_complete(napi);
3104 break;
3105 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003106 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003107 /* bnx2x_has_rx_work() reads the status block,
3108 * thus we need to ensure that status block indices
3109 * have been actually read (bnx2x_update_fpsb_idx)
3110 * prior to this check (bnx2x_has_rx_work) so that
3111 * we won't write the "newer" value of the status block
3112 * to IGU (if there was a DMA right after
3113 * bnx2x_has_rx_work and if there is no rmb, the memory
3114 * reading (bnx2x_update_fpsb_idx) may be postponed
3115 * to right before bnx2x_ack_sb). In this case there
3116 * will never be another interrupt until there is
3117 * another update of the status block, while there
3118 * is still unhandled work.
3119 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003120 rmb();
3121
3122 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3123 napi_complete(napi);
3124 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00003125 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003126 "Update index to %d\n", fp->fp_hc_idx);
3127 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3128 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003129 IGU_INT_ENABLE, 1);
3130 break;
3131 }
3132 }
3133 }
3134
3135 return work_done;
3136}
3137
Cong Wange0d10952013-08-01 11:10:25 +08003138#ifdef CONFIG_NET_RX_BUSY_POLL
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003139/* must be called with local_bh_disable()d */
3140int bnx2x_low_latency_recv(struct napi_struct *napi)
3141{
3142 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3143 napi);
3144 struct bnx2x *bp = fp->bp;
3145 int found = 0;
3146
3147 if ((bp->state == BNX2X_STATE_CLOSED) ||
3148 (bp->state == BNX2X_STATE_ERROR) ||
3149 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3150 return LL_FLUSH_FAILED;
3151
3152 if (!bnx2x_fp_lock_poll(fp))
3153 return LL_FLUSH_BUSY;
3154
Dmitry Kravkov75b29452013-06-19 01:36:05 +03003155 if (bnx2x_has_rx_work(fp))
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003156 found = bnx2x_rx_int(fp, 4);
Dmitry Kravkov8f20aa52013-06-19 01:36:04 +03003157
3158 bnx2x_fp_unlock_poll(fp);
3159
3160 return found;
3161}
3162#endif
3163
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003164/* we split the first BD into headers and data BDs
3165 * to ease the pain of our fellow microcode engineers
3166 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003167 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003168static u16 bnx2x_tx_split(struct bnx2x *bp,
3169 struct bnx2x_fp_txdata *txdata,
3170 struct sw_tx_bd *tx_buf,
3171 struct eth_tx_start_bd **tx_bd, u16 hlen,
3172 u16 bd_prod)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003173{
3174 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3175 struct eth_tx_bd *d_tx_bd;
3176 dma_addr_t mapping;
3177 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3178
3179 /* first fix first BD */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003180 h_tx_bd->nbytes = cpu_to_le16(hlen);
3181
Dmitry Kravkov91226792013-03-11 05:17:52 +00003182 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3183 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003184
3185 /* now get a new data BD
3186 * (after the pbd) and fill it */
3187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003188 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003189
3190 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3191 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3192
3193 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3194 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3195 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3196
3197 /* this marks the BD as one that has no individual mapping */
3198 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3199
3200 DP(NETIF_MSG_TX_QUEUED,
3201 "TSO split data size is %d (%x:%x)\n",
3202 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3203
3204 /* update tx_bd */
3205 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3206
3207 return bd_prod;
3208}
3209
Yuval Mintz86564c32013-01-23 03:21:50 +00003210#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3211#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003212static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003213{
Yuval Mintz86564c32013-01-23 03:21:50 +00003214 __sum16 tsum = (__force __sum16) csum;
3215
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003216 if (fix > 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003217 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3218 csum_partial(t_header - fix, fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003219
3220 else if (fix < 0)
Yuval Mintz86564c32013-01-23 03:21:50 +00003221 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3222 csum_partial(t_header, -fix, 0)));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003223
Dmitry Kravkove2593fc2013-02-27 00:04:59 +00003224 return bswab16(tsum);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003225}
3226
Dmitry Kravkov91226792013-03-11 05:17:52 +00003227static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003228{
3229 u32 rc;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003230 __u8 prot = 0;
3231 __be16 protocol;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003232
3233 if (skb->ip_summed != CHECKSUM_PARTIAL)
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003234 return XMIT_PLAIN;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003235
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003236 protocol = vlan_get_protocol(skb);
3237 if (protocol == htons(ETH_P_IPV6)) {
3238 rc = XMIT_CSUM_V6;
3239 prot = ipv6_hdr(skb)->nexthdr;
3240 } else {
3241 rc = XMIT_CSUM_V4;
3242 prot = ip_hdr(skb)->protocol;
3243 }
3244
3245 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3246 if (inner_ip_hdr(skb)->version == 6) {
3247 rc |= XMIT_CSUM_ENC_V6;
3248 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003249 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003250 } else {
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003251 rc |= XMIT_CSUM_ENC_V4;
3252 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003253 rc |= XMIT_CSUM_TCP;
3254 }
3255 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003256 if (prot == IPPROTO_TCP)
3257 rc |= XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003258
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003259 if (skb_is_gso_v6(skb)) {
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003260 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003261 if (rc & XMIT_CSUM_ENC)
3262 rc |= XMIT_GSO_ENC_V6;
3263 } else if (skb_is_gso(skb)) {
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003264 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003265 if (rc & XMIT_CSUM_ENC)
3266 rc |= XMIT_GSO_ENC_V4;
3267 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003268
3269 return rc;
3270}
3271
3272#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3273/* check if packet requires linearization (packet is too fragmented)
3274 no need to check fragmentation if page size > 8K (there will be no
3275 violation to FW restrictions) */
3276static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3277 u32 xmit_type)
3278{
3279 int to_copy = 0;
3280 int hlen = 0;
3281 int first_bd_sz = 0;
3282
3283 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3284 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3285
3286 if (xmit_type & XMIT_GSO) {
3287 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3288 /* Check if LSO packet needs to be copied:
3289 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3290 int wnd_size = MAX_FETCH_BD - 3;
3291 /* Number of windows to check */
3292 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3293 int wnd_idx = 0;
3294 int frag_idx = 0;
3295 u32 wnd_sum = 0;
3296
3297 /* Headers length */
3298 hlen = (int)(skb_transport_header(skb) - skb->data) +
3299 tcp_hdrlen(skb);
3300
3301 /* Amount of data (w/o headers) on linear part of SKB*/
3302 first_bd_sz = skb_headlen(skb) - hlen;
3303
3304 wnd_sum = first_bd_sz;
3305
3306 /* Calculate the first sum - it's special */
3307 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3308 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003309 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003310
3311 /* If there was data on linear skb data - check it */
3312 if (first_bd_sz > 0) {
3313 if (unlikely(wnd_sum < lso_mss)) {
3314 to_copy = 1;
3315 goto exit_lbl;
3316 }
3317
3318 wnd_sum -= first_bd_sz;
3319 }
3320
3321 /* Others are easier: run through the frag list and
3322 check all windows */
3323 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3324 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003325 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003326
3327 if (unlikely(wnd_sum < lso_mss)) {
3328 to_copy = 1;
3329 break;
3330 }
3331 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00003332 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003333 }
3334 } else {
3335 /* in non-LSO too fragmented packet should always
3336 be linearized */
3337 to_copy = 1;
3338 }
3339 }
3340
3341exit_lbl:
3342 if (unlikely(to_copy))
3343 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003344 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003345 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3346 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3347
3348 return to_copy;
3349}
3350#endif
3351
Dmitry Kravkov91226792013-03-11 05:17:52 +00003352static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3353 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003354{
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003355 struct ipv6hdr *ipv6;
3356
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003357 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3358 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3359 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003360
3361 if (xmit_type & XMIT_GSO_ENC_V6)
3362 ipv6 = inner_ipv6_hdr(skb);
3363 else if (xmit_type & XMIT_GSO_V6)
3364 ipv6 = ipv6_hdr(skb);
3365 else
3366 ipv6 = NULL;
3367
3368 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003369 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003370}
3371
3372/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003373 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003374 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003375 * @skb: packet skb
3376 * @pbd: parse BD
3377 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003378 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003379static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3380 struct eth_tx_parse_bd_e1x *pbd,
Yuval Mintz057cf652013-05-19 04:41:01 +00003381 struct eth_tx_start_bd *tx_start_bd,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003382 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003383{
3384 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
Yuval Mintz86564c32013-01-23 03:21:50 +00003385 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003386 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003387
3388 if (xmit_type & XMIT_GSO_V4) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003389 pbd->ip_id = bswab16(ip_hdr(skb)->id);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003390 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003391 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3392 ip_hdr(skb)->daddr,
3393 0, IPPROTO_TCP, 0));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003394
Yuval Mintz057cf652013-05-19 04:41:01 +00003395 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3396 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3397 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003398 pbd->tcp_pseudo_csum =
Yuval Mintz86564c32013-01-23 03:21:50 +00003399 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3400 &ipv6_hdr(skb)->daddr,
3401 0, IPPROTO_TCP, 0));
Yuval Mintz057cf652013-05-19 04:41:01 +00003402 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003403
Yuval Mintz86564c32013-01-23 03:21:50 +00003404 pbd->global_data |=
3405 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003406}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003407
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003408/**
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003409 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3410 *
3411 * @bp: driver handle
3412 * @skb: packet skb
3413 * @parsing_data: data to be updated
3414 * @xmit_type: xmit flags
3415 *
3416 * 57712/578xx related, when skb has encapsulation
3417 */
3418static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3419 u32 *parsing_data, u32 xmit_type)
3420{
3421 *parsing_data |=
3422 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3423 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3424 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3425
3426 if (xmit_type & XMIT_CSUM_TCP) {
3427 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3428 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3429 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3430
3431 return skb_inner_transport_header(skb) +
3432 inner_tcp_hdrlen(skb) - skb->data;
3433 }
3434
3435 /* We support checksum offload for TCP and UDP only.
3436 * No need to pass the UDP header length - it's a constant.
3437 */
3438 return skb_inner_transport_header(skb) +
3439 sizeof(struct udphdr) - skb->data;
3440}
3441
3442/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003443 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003444 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003445 * @bp: driver handle
3446 * @skb: packet skb
3447 * @parsing_data: data to be updated
3448 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003449 *
Dmitry Kravkov91226792013-03-11 05:17:52 +00003450 * 57712/578xx related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003451 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003452static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3453 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003454{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003455 *parsing_data |=
Yuval Mintz2de67432013-01-23 03:21:43 +00003456 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
Dmitry Kravkov91226792013-03-11 05:17:52 +00003457 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3458 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003459
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003460 if (xmit_type & XMIT_CSUM_TCP) {
3461 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3462 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3463 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003464
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003465 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
Yuval Mintz924d75a2013-01-23 03:21:44 +00003466 }
3467 /* We support checksum offload for TCP and UDP only.
3468 * No need to pass the UDP header length - it's a constant.
3469 */
3470 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003471}
3472
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003473/* set FW indication according to inner or outer protocols if tunneled */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003474static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3475 struct eth_tx_start_bd *tx_start_bd,
3476 u32 xmit_type)
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003477{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003478 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3479
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003480 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
Dmitry Kravkov91226792013-03-11 05:17:52 +00003481 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003482
3483 if (!(xmit_type & XMIT_CSUM_TCP))
3484 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003485}
3486
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003487/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00003488 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003489 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00003490 * @bp: driver handle
3491 * @skb: packet skb
3492 * @pbd: parse BD to be updated
3493 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003494 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003495static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3496 struct eth_tx_parse_bd_e1x *pbd,
3497 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003498{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003499 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003500
3501 /* for now NS flag is not used in Linux */
3502 pbd->global_data =
Yuval Mintz86564c32013-01-23 03:21:50 +00003503 cpu_to_le16(hlen |
3504 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3505 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003506
3507 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003508 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003509
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00003510 hlen += pbd->ip_hlen_w;
3511
3512 /* We support checksum offload for TCP and UDP only */
3513 if (xmit_type & XMIT_CSUM_TCP)
3514 hlen += tcp_hdrlen(skb) / 2;
3515 else
3516 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003517
3518 pbd->total_hlen_w = cpu_to_le16(hlen);
3519 hlen = hlen*2;
3520
3521 if (xmit_type & XMIT_CSUM_TCP) {
Yuval Mintz86564c32013-01-23 03:21:50 +00003522 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003523
3524 } else {
3525 s8 fix = SKB_CS_OFF(skb); /* signed! */
3526
3527 DP(NETIF_MSG_TX_QUEUED,
3528 "hlen %d fix %d csum before fix %x\n",
3529 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3530
3531 /* HW bug: fixup the CSUM */
3532 pbd->tcp_pseudo_csum =
3533 bnx2x_csum_fix(skb_transport_header(skb),
3534 SKB_CS(skb), fix);
3535
3536 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3537 pbd->tcp_pseudo_csum);
3538 }
3539
3540 return hlen;
3541}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003542
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003543static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3544 struct eth_tx_parse_bd_e2 *pbd_e2,
3545 struct eth_tx_parse_2nd_bd *pbd2,
3546 u16 *global_data,
3547 u32 xmit_type)
3548{
Dmitry Kravkove287a752013-03-21 15:38:24 +00003549 u16 hlen_w = 0;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003550 u8 outerip_off, outerip_len = 0;
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003551
Dmitry Kravkove287a752013-03-21 15:38:24 +00003552 /* from outer IP to transport */
3553 hlen_w = (skb_inner_transport_header(skb) -
3554 skb_network_header(skb)) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003555
3556 /* transport len */
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003557 hlen_w += inner_tcp_hdrlen(skb) >> 1;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003558
Dmitry Kravkove287a752013-03-21 15:38:24 +00003559 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003560
Dmitry Kravkove768fb22013-06-02 23:28:41 +00003561 /* outer IP header info */
3562 if (xmit_type & XMIT_CSUM_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003563 struct iphdr *iph = ip_hdr(skb);
Dmitry Kravkov1b4fc0e2013-07-11 15:48:21 +03003564 u32 csum = (__force u32)(~iph->check) -
3565 (__force u32)iph->tot_len -
3566 (__force u32)iph->frag_off;
Yuval Mintzc957d092013-06-25 08:50:11 +03003567
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003568 pbd2->fw_ip_csum_wo_len_flags_frag =
Yuval Mintzc957d092013-06-25 08:50:11 +03003569 bswab16(csum_fold((__force __wsum)csum));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003570 } else {
3571 pbd2->fw_ip_hdr_to_payload_w =
Dmitry Kravkove287a752013-03-21 15:38:24 +00003572 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003573 }
3574
3575 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3576
3577 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3578
3579 if (xmit_type & XMIT_GSO_V4) {
Dmitry Kravkove287a752013-03-21 15:38:24 +00003580 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003581
3582 pbd_e2->data.tunnel_data.pseudo_csum =
3583 bswab16(~csum_tcpudp_magic(
3584 inner_ip_hdr(skb)->saddr,
3585 inner_ip_hdr(skb)->daddr,
3586 0, IPPROTO_TCP, 0));
3587
3588 outerip_len = ip_hdr(skb)->ihl << 1;
3589 } else {
3590 pbd_e2->data.tunnel_data.pseudo_csum =
3591 bswab16(~csum_ipv6_magic(
3592 &inner_ipv6_hdr(skb)->saddr,
3593 &inner_ipv6_hdr(skb)->daddr,
3594 0, IPPROTO_TCP, 0));
3595 }
3596
3597 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3598
3599 *global_data |=
3600 outerip_off |
3601 (!!(xmit_type & XMIT_CSUM_V6) <<
3602 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3603 (outerip_len <<
3604 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3605 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3606 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
Dmitry Kravkov65bc0cf2013-04-28 08:16:02 +00003607
3608 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3609 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3610 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3611 }
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003612}
3613
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003614/* called with netif_tx_lock
3615 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3616 * netif_wake_queue()
3617 */
3618netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3619{
3620 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003621
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003622 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003623 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003624 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003625 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003626 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003627 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003628 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003629 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003630 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003631 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00003632 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003633 dma_addr_t mapping;
3634 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3635 int i;
3636 u8 hlen = 0;
3637 __le16 pkt_size = 0;
3638 struct ethhdr *eth;
3639 u8 mac_type = UNICAST_ADDRESS;
3640
3641#ifdef BNX2X_STOP_ON_ERROR
3642 if (unlikely(bp->panic))
3643 return NETDEV_TX_BUSY;
3644#endif
3645
Ariel Elior6383c0b2011-07-14 08:31:57 +00003646 txq_index = skb_get_queue_mapping(skb);
3647 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003648
Merav Sicron55c11942012-11-07 00:45:48 +00003649 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003650
Merav Sicron65565882012-06-19 07:48:26 +00003651 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003652
3653 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00003654 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003655 txq_index, fp_index, txdata_index); */
3656
Yuval Mintz16a5fd92013-06-02 00:06:18 +00003657 /* enable this debug print to view the transmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00003658 DP(NETIF_MSG_TX_QUEUED,
3659 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003660 txdata->cid, fp_index, txdata_index, txdata, fp); */
3661
3662 if (unlikely(bnx2x_tx_avail(bp, txdata) <
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00003663 skb_shinfo(skb)->nr_frags +
3664 BDS_PER_TX_PKT +
3665 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003666 /* Handle special storage cases separately */
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003667 if (txdata->tx_ring_size == 0) {
3668 struct bnx2x_eth_q_stats *q_stats =
3669 bnx2x_fp_qstats(bp, txdata->parent_fp);
3670 q_stats->driver_filtered_tx_pkt++;
3671 dev_kfree_skb(skb);
3672 return NETDEV_TX_OK;
3673 }
Yuval Mintz2de67432013-01-23 03:21:43 +00003674 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3675 netif_tx_stop_queue(txq);
Dmitry Kravkovc96bdc02012-12-02 04:05:48 +00003676 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
Dmitry Kravkov2384d6a2012-10-16 01:28:27 +00003677
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003678 return NETDEV_TX_BUSY;
3679 }
3680
Merav Sicron51c1a582012-03-18 10:33:38 +00003681 DP(NETIF_MSG_TX_QUEUED,
Yuval Mintz04c46732013-01-23 03:21:46 +00003682 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003683 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Yuval Mintz04c46732013-01-23 03:21:46 +00003684 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3685 skb->len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003686
3687 eth = (struct ethhdr *)skb->data;
3688
3689 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3690 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3691 if (is_broadcast_ether_addr(eth->h_dest))
3692 mac_type = BROADCAST_ADDRESS;
3693 else
3694 mac_type = MULTICAST_ADDRESS;
3695 }
3696
Dmitry Kravkov91226792013-03-11 05:17:52 +00003697#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003698 /* First, check if we need to linearize the skb (due to FW
3699 restrictions). No need to check fragmentation if page size > 8K
3700 (there will be no violation to FW restrictions) */
3701 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3702 /* Statistics of linearization */
3703 bp->lin_cnt++;
3704 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003705 DP(NETIF_MSG_TX_QUEUED,
3706 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003707 dev_kfree_skb_any(skb);
3708 return NETDEV_TX_OK;
3709 }
3710 }
3711#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003712 /* Map skb linear data for DMA */
3713 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3714 skb_headlen(skb), DMA_TO_DEVICE);
3715 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003716 DP(NETIF_MSG_TX_QUEUED,
3717 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003718 dev_kfree_skb_any(skb);
3719 return NETDEV_TX_OK;
3720 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003721 /*
3722 Please read carefully. First we use one BD which we mark as start,
3723 then we have a parsing info BD (used for TSO or xsum),
3724 and only then we have the rest of the TSO BDs.
3725 (don't forget to mark the last one as last,
3726 and to unmap only AFTER you write to the BD ...)
3727 And above all, all pdb sizes are in words - NOT DWORDS!
3728 */
3729
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003730 /* get current pkt produced now - advance it just before sending packet
3731 * since mapping of pages may fail and cause packet to be dropped
3732 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003733 pkt_prod = txdata->tx_pkt_prod;
3734 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003735
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003736 /* get a tx_buf and first BD
3737 * tx_start_bd may be changed during SPLIT,
3738 * but first_bd will always stay first
3739 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003740 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3741 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003742 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003743
3744 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003745
Dmitry Kravkov91226792013-03-11 05:17:52 +00003746 /* header nbd: indirectly zero other flags! */
3747 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003748
3749 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003750 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003751 tx_buf->skb = skb;
3752 tx_buf->flags = 0;
3753
3754 DP(NETIF_MSG_TX_QUEUED,
3755 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003756 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003757
Jesse Grosseab6d182010-10-20 13:56:03 +00003758 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003759 tx_start_bd->vlan_or_ethertype =
3760 cpu_to_le16(vlan_tx_tag_get(skb));
3761 tx_start_bd->bd_flags.as_bitfield |=
3762 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003763 } else {
3764 /* when transmitting in a vf, start bd must hold the ethertype
3765 * for fw to enforce it
3766 */
Dmitry Kravkov91226792013-03-11 05:17:52 +00003767 if (IS_VF(bp))
Ariel Eliordc1ba592013-01-01 05:22:30 +00003768 tx_start_bd->vlan_or_ethertype =
3769 cpu_to_le16(ntohs(eth->h_proto));
Dmitry Kravkov91226792013-03-11 05:17:52 +00003770 else
Ariel Eliordc1ba592013-01-01 05:22:30 +00003771 /* used by FW for packet accounting */
3772 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Ariel Eliordc1ba592013-01-01 05:22:30 +00003773 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003774
Dmitry Kravkov91226792013-03-11 05:17:52 +00003775 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3776
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003777 /* turn on parsing and get a BD */
3778 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003779
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003780 if (xmit_type & XMIT_CSUM)
3781 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003782
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003783 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003784 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003785 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003786
3787 if (xmit_type & XMIT_CSUM_ENC) {
3788 u16 global_data = 0;
3789
3790 /* Set PBD in enc checksum offload case */
3791 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3792 &pbd_e2_parsing_data,
3793 xmit_type);
3794
3795 /* turn on 2nd parsing and get a BD */
3796 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3797
3798 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3799
3800 memset(pbd2, 0, sizeof(*pbd2));
3801
3802 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3803 (skb_inner_network_header(skb) -
3804 skb->data) >> 1;
3805
3806 if (xmit_type & XMIT_GSO_ENC)
3807 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3808 &global_data,
3809 xmit_type);
3810
3811 pbd2->global_data = cpu_to_le16(global_data);
3812
3813 /* add addition parse BD indication to start BD */
3814 SET_FLAG(tx_start_bd->general_data,
3815 ETH_TX_START_BD_PARSE_NBDS, 1);
3816 /* set encapsulation flag in start BD */
3817 SET_FLAG(tx_start_bd->general_data,
3818 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3819 nbd++;
3820 } else if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov91226792013-03-11 05:17:52 +00003821 /* Set PBD in checksum offload case w/o encapsulation */
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003822 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3823 &pbd_e2_parsing_data,
3824 xmit_type);
Dmitry Kravkova848ade2013-03-18 06:51:03 +00003825 }
Ariel Eliordc1ba592013-01-01 05:22:30 +00003826
Dmitry Kravkov91226792013-03-11 05:17:52 +00003827 /* Add the macs to the parsing BD this is a vf */
3828 if (IS_VF(bp)) {
3829 /* override GRE parameters in BD */
3830 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3831 &pbd_e2->data.mac_addr.src_mid,
3832 &pbd_e2->data.mac_addr.src_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003833 eth->h_source);
Dmitry Kravkov91226792013-03-11 05:17:52 +00003834
3835 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3836 &pbd_e2->data.mac_addr.dst_mid,
3837 &pbd_e2->data.mac_addr.dst_lo,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003838 eth->h_dest);
3839 }
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003840
3841 SET_FLAG(pbd_e2_parsing_data,
3842 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003843 } else {
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003844 u16 global_data = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003845 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003846 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3847 /* Set PBD in checksum offload case */
3848 if (xmit_type & XMIT_CSUM)
3849 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003850
Yuval Mintz96bed4b2012-10-01 03:46:19 +00003851 SET_FLAG(global_data,
3852 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3853 pbd_e1x->global_data |= cpu_to_le16(global_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003854 }
3855
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003856 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003857 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3858 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003859 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3860 pkt_size = tx_start_bd->nbytes;
3861
Merav Sicron51c1a582012-03-18 10:33:38 +00003862 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003863 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003864 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003865 le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003866 tx_start_bd->bd_flags.as_bitfield,
3867 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003868
3869 if (xmit_type & XMIT_GSO) {
3870
3871 DP(NETIF_MSG_TX_QUEUED,
3872 "TSO packet len %d hlen %d total len %d tso size %d\n",
3873 skb->len, hlen, skb_headlen(skb),
3874 skb_shinfo(skb)->gso_size);
3875
3876 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3877
Dmitry Kravkov91226792013-03-11 05:17:52 +00003878 if (unlikely(skb_headlen(skb) > hlen)) {
3879 nbd++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003880 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3881 &tx_start_bd, hlen,
Dmitry Kravkov91226792013-03-11 05:17:52 +00003882 bd_prod);
3883 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003884 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003885 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3886 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003887 else
Yuval Mintz44dbc782013-06-03 02:59:57 +00003888 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003889 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003890
3891 /* Set the PBD's parsing_data field if not zero
3892 * (for the chips newer than 57711).
3893 */
3894 if (pbd_e2_parsing_data)
3895 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3896
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003897 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3898
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003899 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003900 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3901 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3902
Eric Dumazet9e903e02011-10-18 21:00:24 +00003903 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3904 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003905 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003906 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003907
Merav Sicron51c1a582012-03-18 10:33:38 +00003908 DP(NETIF_MSG_TX_QUEUED,
3909 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003910
3911 /* we need unmap all buffers already mapped
3912 * for this SKB;
3913 * first_bd->nbd need to be properly updated
3914 * before call to bnx2x_free_tx_pkt
3915 */
3916 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003917 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003918 TX_BD(txdata->tx_pkt_prod),
3919 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003920 return NETDEV_TX_OK;
3921 }
3922
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003923 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003924 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003925 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003926 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003927
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003928 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3929 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003930 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3931 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003932 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003933
3934 DP(NETIF_MSG_TX_QUEUED,
3935 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3936 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3937 le16_to_cpu(tx_data_bd->nbytes));
3938 }
3939
3940 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3941
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003942 /* update with actual num BDs */
3943 first_bd->nbd = cpu_to_le16(nbd);
3944
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003945 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3946
3947 /* now send a tx doorbell, counting the next BD
3948 * if the packet contains or ends with it
3949 */
3950 if (TX_BD_POFF(bd_prod) < nbd)
3951 nbd++;
3952
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003953 /* total_pkt_bytes should be set on the first data BD if
3954 * it's not an LSO packet and there is more than one
3955 * data BD. In this case pkt_size is limited by an MTU value.
3956 * However we prefer to set it for an LSO packet (while we don't
3957 * have to) in order to save some CPU cycles in a none-LSO
3958 * case, when we much more care about them.
3959 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003960 if (total_pkt_bd != NULL)
3961 total_pkt_bd->total_pkt_bytes = pkt_size;
3962
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003963 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003964 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003965 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003966 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3967 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3968 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3969 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003970 if (pbd_e2)
3971 DP(NETIF_MSG_TX_QUEUED,
3972 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
Dmitry Kravkov91226792013-03-11 05:17:52 +00003973 pbd_e2,
3974 pbd_e2->data.mac_addr.dst_hi,
3975 pbd_e2->data.mac_addr.dst_mid,
3976 pbd_e2->data.mac_addr.dst_lo,
3977 pbd_e2->data.mac_addr.src_hi,
3978 pbd_e2->data.mac_addr.src_mid,
3979 pbd_e2->data.mac_addr.src_lo,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003980 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003981 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3982
Tom Herbert2df1a702011-11-28 16:33:37 +00003983 netdev_tx_sent_queue(txq, skb->len);
3984
Willem de Bruijn8373c572012-04-27 09:04:06 +00003985 skb_tx_timestamp(skb);
3986
Ariel Elior6383c0b2011-07-14 08:31:57 +00003987 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003988 /*
3989 * Make sure that the BD data is updated before updating the producer
3990 * since FW might read the BD right after the producer is updated.
3991 * This is only applicable for weak-ordered memory model archs such
3992 * as IA-64. The following barrier is also mandatory since FW will
3993 * assumes packets must have BDs.
3994 */
3995 wmb();
3996
Ariel Elior6383c0b2011-07-14 08:31:57 +00003997 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003998 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003999
Ariel Elior6383c0b2011-07-14 08:31:57 +00004000 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004001
4002 mmiowb();
4003
Ariel Elior6383c0b2011-07-14 08:31:57 +00004004 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004005
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004006 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004007 netif_tx_stop_queue(txq);
4008
4009 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4010 * ordering of set_bit() in netif_tx_stop_queue() and read of
4011 * fp->bd_tx_cons */
4012 smp_mb();
4013
Barak Witkowski15192a82012-06-19 07:48:28 +00004014 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
Dmitry Kravkov7df2dc62012-06-25 22:32:50 +00004015 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004016 netif_tx_wake_queue(txq);
4017 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00004018 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004019
4020 return NETDEV_TX_OK;
4021}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004022
Ariel Elior6383c0b2011-07-14 08:31:57 +00004023/**
4024 * bnx2x_setup_tc - routine to configure net_device for multi tc
4025 *
4026 * @netdev: net device to configure
4027 * @tc: number of traffic classes to enable
4028 *
4029 * callback connected to the ndo_setup_tc function pointer
4030 */
4031int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4032{
4033 int cos, prio, count, offset;
4034 struct bnx2x *bp = netdev_priv(dev);
4035
4036 /* setup tc must be called under rtnl lock */
4037 ASSERT_RTNL();
4038
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004039 /* no traffic classes requested. Aborting */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004040 if (!num_tc) {
4041 netdev_reset_tc(dev);
4042 return 0;
4043 }
4044
4045 /* requested to support too many traffic classes */
4046 if (num_tc > bp->max_cos) {
Yuval Mintz6bf07b82013-06-02 00:06:20 +00004047 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
Merav Sicron51c1a582012-03-18 10:33:38 +00004048 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004049 return -EINVAL;
4050 }
4051
4052 /* declare amount of supported traffic classes */
4053 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004054 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004055 return -EINVAL;
4056 }
4057
4058 /* configure priority to traffic class mapping */
4059 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4060 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00004061 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4062 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004063 prio, bp->prio_to_cos[prio]);
4064 }
4065
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004066 /* Use this configuration to differentiate tc0 from other COSes
Ariel Elior6383c0b2011-07-14 08:31:57 +00004067 This can be used for ets or pfc, and save the effort of setting
4068 up a multio class queue disc or negotiating DCBX with a switch
4069 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00004070 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004071 for (prio = 1; prio < 16; prio++) {
4072 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00004073 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004074 } */
4075
4076 /* configure traffic class to transmission queue mapping */
4077 for (cos = 0; cos < bp->max_cos; cos++) {
4078 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00004079 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004080 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00004081 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4082 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004083 cos, offset, count);
4084 }
4085
4086 return 0;
4087}
4088
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004089/* called with rtnl_lock */
4090int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4091{
4092 struct sockaddr *addr = p;
4093 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004094 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004095
Merav Sicron51c1a582012-03-18 10:33:38 +00004096 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4097 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004098 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004099 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004100
Barak Witkowskia3348722012-04-23 03:04:46 +00004101 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4102 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004103 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004104 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004105 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004106
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004107 if (netif_running(dev)) {
4108 rc = bnx2x_set_eth_mac(bp, false);
4109 if (rc)
4110 return rc;
4111 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004112
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004113 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4114
4115 if (netif_running(dev))
4116 rc = bnx2x_set_eth_mac(bp, true);
4117
4118 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004119}
4120
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004121static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4122{
4123 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4124 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004125 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004126
4127 /* Common */
Merav Sicron55c11942012-11-07 00:45:48 +00004128
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004129 if (IS_FCOE_IDX(fp_index)) {
4130 memset(sb, 0, sizeof(union host_hc_status_block));
4131 fp->status_blk_mapping = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004132 } else {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004133 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004134 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004135 BNX2X_PCI_FREE(sb->e2_sb,
4136 bnx2x_fp(bp, fp_index,
4137 status_blk_mapping),
4138 sizeof(struct host_hc_status_block_e2));
4139 else
4140 BNX2X_PCI_FREE(sb->e1x_sb,
4141 bnx2x_fp(bp, fp_index,
4142 status_blk_mapping),
4143 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004144 }
Merav Sicron55c11942012-11-07 00:45:48 +00004145
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004146 /* Rx */
4147 if (!skip_rx_queue(bp, fp_index)) {
4148 bnx2x_free_rx_bds(fp);
4149
4150 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4151 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4152 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4153 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4154 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4155
4156 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4157 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4158 sizeof(struct eth_fast_path_rx_cqe) *
4159 NUM_RCQ_BD);
4160
4161 /* SGE ring */
4162 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4163 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4164 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4165 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4166 }
4167
4168 /* Tx */
4169 if (!skip_tx_queue(bp, fp_index)) {
4170 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004171 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004172 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004173
Merav Sicron51c1a582012-03-18 10:33:38 +00004174 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00004175 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004176 fp_index, cos, txdata->cid);
4177
4178 BNX2X_FREE(txdata->tx_buf_ring);
4179 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4180 txdata->tx_desc_mapping,
4181 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4182 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004183 }
4184 /* end of fastpath */
4185}
4186
Merav Sicron55c11942012-11-07 00:45:48 +00004187void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4188{
4189 int i;
4190 for_each_cnic_queue(bp, i)
4191 bnx2x_free_fp_mem_at(bp, i);
4192}
4193
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004194void bnx2x_free_fp_mem(struct bnx2x *bp)
4195{
4196 int i;
Merav Sicron55c11942012-11-07 00:45:48 +00004197 for_each_eth_queue(bp, i)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004198 bnx2x_free_fp_mem_at(bp, i);
4199}
4200
Eric Dumazet1191cb82012-04-27 21:39:21 +00004201static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004202{
4203 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004204 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004205 bnx2x_fp(bp, index, sb_index_values) =
4206 (__le16 *)status_blk.e2_sb->sb.index_values;
4207 bnx2x_fp(bp, index, sb_running_index) =
4208 (__le16 *)status_blk.e2_sb->sb.running_index;
4209 } else {
4210 bnx2x_fp(bp, index, sb_index_values) =
4211 (__le16 *)status_blk.e1x_sb->sb.index_values;
4212 bnx2x_fp(bp, index, sb_running_index) =
4213 (__le16 *)status_blk.e1x_sb->sb.running_index;
4214 }
4215}
4216
Eric Dumazet1191cb82012-04-27 21:39:21 +00004217/* Returns the number of actually allocated BDs */
4218static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4219 int rx_ring_size)
4220{
4221 struct bnx2x *bp = fp->bp;
4222 u16 ring_prod, cqe_ring_prod;
4223 int i, failure_cnt = 0;
4224
4225 fp->rx_comp_cons = 0;
4226 cqe_ring_prod = ring_prod = 0;
4227
4228 /* This routine is called only during fo init so
4229 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4230 */
4231 for (i = 0; i < rx_ring_size; i++) {
Michal Schmidt996dedb2013-09-05 22:13:09 +02004232 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
Eric Dumazet1191cb82012-04-27 21:39:21 +00004233 failure_cnt++;
4234 continue;
4235 }
4236 ring_prod = NEXT_RX_IDX(ring_prod);
4237 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4238 WARN_ON(ring_prod <= (i - failure_cnt));
4239 }
4240
4241 if (failure_cnt)
4242 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4243 i - failure_cnt, fp->index);
4244
4245 fp->rx_bd_prod = ring_prod;
4246 /* Limit the CQE producer by the CQE ring size */
4247 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4248 cqe_ring_prod);
4249 fp->rx_pkt = fp->rx_calls = 0;
4250
Barak Witkowski15192a82012-06-19 07:48:28 +00004251 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
Eric Dumazet1191cb82012-04-27 21:39:21 +00004252
4253 return i - failure_cnt;
4254}
4255
4256static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4257{
4258 int i;
4259
4260 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4261 struct eth_rx_cqe_next_page *nextpg;
4262
4263 nextpg = (struct eth_rx_cqe_next_page *)
4264 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4265 nextpg->addr_hi =
4266 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4267 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4268 nextpg->addr_lo =
4269 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4270 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4271 }
4272}
4273
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004274static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4275{
4276 union host_hc_status_block *sb;
4277 struct bnx2x_fastpath *fp = &bp->fp[index];
4278 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004279 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04004280 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004281
Barak Witkowskia3348722012-04-23 03:04:46 +00004282 if (!bp->rx_ring_size &&
4283 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004284 rx_ring_size = MIN_RX_SIZE_NONTPA;
4285 bp->rx_ring_size = rx_ring_size;
Merav Sicron55c11942012-11-07 00:45:48 +00004286 } else if (!bp->rx_ring_size) {
David S. Miller8decf862011-09-22 03:23:13 -04004287 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4288
Yuval Mintz065f8b92012-10-03 04:22:59 +00004289 if (CHIP_IS_E3(bp)) {
4290 u32 cfg = SHMEM_RD(bp,
4291 dev_info.port_hw_config[BP_PORT(bp)].
4292 default_cfg);
4293
4294 /* Decrease ring size for 1G functions */
4295 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4296 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4297 rx_ring_size /= 10;
4298 }
Mintz Yuvald760fc32012-02-15 02:10:28 +00004299
David S. Miller8decf862011-09-22 03:23:13 -04004300 /* allocate at least number of buffers required by FW */
4301 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4302 MIN_RX_SIZE_TPA, rx_ring_size);
4303
4304 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00004305 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04004306 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004307
Yuval Mintz04c46732013-01-23 03:21:46 +00004308 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4309
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004310 /* Common */
4311 sb = &bnx2x_fp(bp, index, status_blk);
Merav Sicron55c11942012-11-07 00:45:48 +00004312
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004313 if (!IS_FCOE_IDX(index)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004314 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004315 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004316 BNX2X_PCI_ALLOC(sb->e2_sb,
4317 &bnx2x_fp(bp, index, status_blk_mapping),
4318 sizeof(struct host_hc_status_block_e2));
4319 else
4320 BNX2X_PCI_ALLOC(sb->e1x_sb,
4321 &bnx2x_fp(bp, index, status_blk_mapping),
4322 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004323 }
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004324
4325 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4326 * set shortcuts for it.
4327 */
4328 if (!IS_FCOE_IDX(index))
4329 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004330
4331 /* Tx */
4332 if (!skip_tx_queue(bp, index)) {
4333 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00004334 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00004335 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00004336
Merav Sicron51c1a582012-03-18 10:33:38 +00004337 DP(NETIF_MSG_IFUP,
4338 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00004339 index, cos);
4340
4341 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004342 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004343 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4344 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004345 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004346 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004347 }
4348
4349 /* Rx */
4350 if (!skip_rx_queue(bp, index)) {
4351 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4352 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4353 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4354 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4355 &bnx2x_fp(bp, index, rx_desc_mapping),
4356 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4357
Dmitry Kravkov75b29452013-06-19 01:36:05 +03004358 /* Seed all CQEs by 1s */
4359 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4360 &bnx2x_fp(bp, index, rx_comp_mapping),
4361 sizeof(struct eth_fast_path_rx_cqe) *
4362 NUM_RCQ_BD);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004363
4364 /* SGE ring */
4365 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4366 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4367 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4368 &bnx2x_fp(bp, index, rx_sge_mapping),
4369 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4370 /* RX BD ring */
4371 bnx2x_set_next_page_rx_bd(fp);
4372
4373 /* CQ ring */
4374 bnx2x_set_next_page_rx_cq(fp);
4375
4376 /* BDs */
4377 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4378 if (ring_size < rx_ring_size)
4379 goto alloc_mem_err;
4380 }
4381
4382 return 0;
4383
4384/* handles low memory cases */
4385alloc_mem_err:
4386 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4387 index, ring_size);
4388 /* FW will drop all packets if queue is not big enough,
4389 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00004390 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004391 */
4392 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00004393 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004394 /* release memory allocated for this queue */
4395 bnx2x_free_fp_mem_at(bp, index);
4396 return -ENOMEM;
4397 }
4398 return 0;
4399}
4400
Merav Sicron55c11942012-11-07 00:45:48 +00004401int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004402{
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004403 if (!NO_FCOE(bp))
4404 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00004405 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00004406 /* we will fail load process instead of mark
4407 * NO_FCOE_FLAG
4408 */
4409 return -ENOMEM;
Merav Sicron55c11942012-11-07 00:45:48 +00004410
4411 return 0;
4412}
4413
4414int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4415{
4416 int i;
4417
4418 /* 1. Allocate FP for leading - fatal if error
4419 * 2. Allocate RSS - fix number of queues if error
4420 */
4421
4422 /* leading */
4423 if (bnx2x_alloc_fp_mem_at(bp, 0))
4424 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004425
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004426 /* RSS */
4427 for_each_nondefault_eth_queue(bp, i)
4428 if (bnx2x_alloc_fp_mem_at(bp, i))
4429 break;
4430
4431 /* handle memory failures */
4432 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4433 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4434
4435 WARN_ON(delta < 0);
Yuval Mintz4864a162013-01-10 04:53:39 +00004436 bnx2x_shrink_eth_fp(bp, delta);
Merav Sicron55c11942012-11-07 00:45:48 +00004437 if (CNIC_SUPPORT(bp))
4438 /* move non eth FPs next to last eth FP
4439 * must be done in that order
4440 * FCOE_IDX < FWD_IDX < OOO_IDX
4441 */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004442
Merav Sicron55c11942012-11-07 00:45:48 +00004443 /* move FCoE fp even NO_FCOE_FLAG is on */
4444 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4445 bp->num_ethernet_queues -= delta;
4446 bp->num_queues = bp->num_ethernet_queues +
4447 bp->num_cnic_queues;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00004448 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4449 bp->num_queues + delta, bp->num_queues);
4450 }
4451
4452 return 0;
4453}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00004454
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004455void bnx2x_free_mem_bp(struct bnx2x *bp)
4456{
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004457 int i;
4458
4459 for (i = 0; i < bp->fp_array_size; i++)
4460 kfree(bp->fp[i].tpa_info);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004461 kfree(bp->fp);
Barak Witkowski15192a82012-06-19 07:48:28 +00004462 kfree(bp->sp_objs);
4463 kfree(bp->fp_stats);
Merav Sicron65565882012-06-19 07:48:26 +00004464 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004465 kfree(bp->msix_table);
4466 kfree(bp->ilt);
4467}
4468
Bill Pemberton0329aba2012-12-03 09:24:24 -05004469int bnx2x_alloc_mem_bp(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004470{
4471 struct bnx2x_fastpath *fp;
4472 struct msix_entry *tbl;
4473 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00004474 int msix_table_size = 0;
Merav Sicron55c11942012-11-07 00:45:48 +00004475 int fp_array_size, txq_array_size;
Barak Witkowski15192a82012-06-19 07:48:28 +00004476 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004477
Ariel Elior6383c0b2011-07-14 08:31:57 +00004478 /*
4479 * The biggest MSI-X table we might need is as a maximum number of fast
Yuval Mintz2de67432013-01-23 03:21:43 +00004480 * path IGU SBs plus default SB (for PF only).
Ariel Elior6383c0b2011-07-14 08:31:57 +00004481 */
Ariel Elior1ab44342013-01-01 05:22:23 +00004482 msix_table_size = bp->igu_sb_cnt;
4483 if (IS_PF(bp))
4484 msix_table_size++;
4485 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
Ariel Elior6383c0b2011-07-14 08:31:57 +00004486
4487 /* fp array: RSS plus CNIC related L2 queues */
Merav Sicron55c11942012-11-07 00:45:48 +00004488 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004489 bp->fp_array_size = fp_array_size;
4490 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
Barak Witkowski15192a82012-06-19 07:48:28 +00004491
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004492 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004493 if (!fp)
4494 goto alloc_err;
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004495 for (i = 0; i < bp->fp_array_size; i++) {
Barak Witkowski15192a82012-06-19 07:48:28 +00004496 fp[i].tpa_info =
4497 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4498 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4499 if (!(fp[i].tpa_info))
4500 goto alloc_err;
4501 }
4502
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004503 bp->fp = fp;
4504
Barak Witkowski15192a82012-06-19 07:48:28 +00004505 /* allocate sp objs */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004506 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
Barak Witkowski15192a82012-06-19 07:48:28 +00004507 GFP_KERNEL);
4508 if (!bp->sp_objs)
4509 goto alloc_err;
4510
4511 /* allocate fp_stats */
Dmitry Kravkovc3146eb2013-01-23 03:21:48 +00004512 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
Barak Witkowski15192a82012-06-19 07:48:28 +00004513 GFP_KERNEL);
4514 if (!bp->fp_stats)
4515 goto alloc_err;
4516
Merav Sicron65565882012-06-19 07:48:26 +00004517 /* Allocate memory for the transmission queues array */
Merav Sicron55c11942012-11-07 00:45:48 +00004518 txq_array_size =
4519 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4520 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4521
4522 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4523 GFP_KERNEL);
Merav Sicron65565882012-06-19 07:48:26 +00004524 if (!bp->bnx2x_txq)
4525 goto alloc_err;
4526
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004527 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00004528 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004529 if (!tbl)
4530 goto alloc_err;
4531 bp->msix_table = tbl;
4532
4533 /* ilt */
4534 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4535 if (!ilt)
4536 goto alloc_err;
4537 bp->ilt = ilt;
4538
4539 return 0;
4540alloc_err:
4541 bnx2x_free_mem_bp(bp);
4542 return -ENOMEM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004543}
4544
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00004545int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00004546{
4547 struct bnx2x *bp = netdev_priv(dev);
4548
4549 if (unlikely(!netif_running(dev)))
4550 return 0;
4551
Yuval Mintz5d07d862012-09-13 02:56:21 +00004552 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
Michał Mirosław66371c42011-04-12 09:38:23 +00004553 return bnx2x_nic_load(bp, LOAD_NORMAL);
4554}
4555
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004556int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4557{
4558 u32 sel_phy_idx = 0;
4559 if (bp->link_params.num_phys <= 1)
4560 return INT_PHY;
4561
4562 if (bp->link_vars.link_up) {
4563 sel_phy_idx = EXT_PHY1;
4564 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4565 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4566 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4567 sel_phy_idx = EXT_PHY2;
4568 } else {
4569
4570 switch (bnx2x_phy_selection(&bp->link_params)) {
4571 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4572 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4573 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4574 sel_phy_idx = EXT_PHY1;
4575 break;
4576 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4577 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4578 sel_phy_idx = EXT_PHY2;
4579 break;
4580 }
4581 }
4582
4583 return sel_phy_idx;
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004584}
4585int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4586{
4587 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4588 /*
Yuval Mintz2de67432013-01-23 03:21:43 +00004589 * The selected activated PHY is always after swapping (in case PHY
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00004590 * swapping is enabled). So when swapping is enabled, we need to reverse
4591 * the configuration
4592 */
4593
4594 if (bp->link_params.multi_phy_config &
4595 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4596 if (sel_phy_idx == EXT_PHY1)
4597 sel_phy_idx = EXT_PHY2;
4598 else if (sel_phy_idx == EXT_PHY2)
4599 sel_phy_idx = EXT_PHY1;
4600 }
4601 return LINK_CONFIG_IDX(sel_phy_idx);
4602}
4603
Merav Sicron55c11942012-11-07 00:45:48 +00004604#ifdef NETDEV_FCOE_WWNN
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004605int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4606{
4607 struct bnx2x *bp = netdev_priv(dev);
4608 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4609
4610 switch (type) {
4611 case NETDEV_FCOE_WWNN:
4612 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4613 cp->fcoe_wwn_node_name_lo);
4614 break;
4615 case NETDEV_FCOE_WWPN:
4616 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4617 cp->fcoe_wwn_port_name_lo);
4618 break;
4619 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00004620 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00004621 return -EINVAL;
4622 }
4623
4624 return 0;
4625}
4626#endif
4627
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004628/* called with rtnl_lock */
4629int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4630{
4631 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004632
4633 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004634 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004635 return -EAGAIN;
4636 }
4637
4638 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00004639 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4640 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004641 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00004642 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004643
4644 /* This does not race with packet allocation
4645 * because the actual alloc size is
4646 * only updated as part of load
4647 */
4648 dev->mtu = new_mtu;
4649
Michał Mirosław66371c42011-04-12 09:38:23 +00004650 return bnx2x_reload_if_running(dev);
4651}
4652
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004653netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004654 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004655{
4656 struct bnx2x *bp = netdev_priv(dev);
4657
4658 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004659 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004660 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004661 features &= ~NETIF_F_GRO;
4662 }
Michał Mirosław66371c42011-04-12 09:38:23 +00004663
4664 return features;
4665}
4666
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004667int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00004668{
4669 struct bnx2x *bp = netdev_priv(dev);
4670 u32 flags = bp->flags;
Eric Dumazet8802f572013-05-18 07:14:53 +00004671 u32 changes;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004672 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00004673
4674 if (features & NETIF_F_LRO)
4675 flags |= TPA_ENABLE_FLAG;
4676 else
4677 flags &= ~TPA_ENABLE_FLAG;
4678
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00004679 if (features & NETIF_F_GRO)
4680 flags |= GRO_ENABLE_FLAG;
4681 else
4682 flags &= ~GRO_ENABLE_FLAG;
4683
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004684 if (features & NETIF_F_LOOPBACK) {
4685 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4686 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4687 bnx2x_reload = true;
4688 }
4689 } else {
4690 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4691 bp->link_params.loopback_mode = LOOPBACK_NONE;
4692 bnx2x_reload = true;
4693 }
4694 }
4695
Eric Dumazet8802f572013-05-18 07:14:53 +00004696 changes = flags ^ bp->flags;
4697
Yuval Mintz16a5fd92013-06-02 00:06:18 +00004698 /* if GRO is changed while LRO is enabled, don't force a reload */
Eric Dumazet8802f572013-05-18 07:14:53 +00004699 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4700 changes &= ~GRO_ENABLE_FLAG;
4701
4702 if (changes)
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004703 bnx2x_reload = true;
Eric Dumazet8802f572013-05-18 07:14:53 +00004704
4705 bp->flags = flags;
Michał Mirosław66371c42011-04-12 09:38:23 +00004706
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00004707 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00004708 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4709 return bnx2x_reload_if_running(dev);
4710 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004711 }
4712
Michał Mirosław66371c42011-04-12 09:38:23 +00004713 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004714}
4715
4716void bnx2x_tx_timeout(struct net_device *dev)
4717{
4718 struct bnx2x *bp = netdev_priv(dev);
4719
4720#ifdef BNX2X_STOP_ON_ERROR
4721 if (!bp->panic)
4722 bnx2x_panic();
4723#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00004724
4725 smp_mb__before_clear_bit();
4726 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4727 smp_mb__after_clear_bit();
4728
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004729 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00004730 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004731}
4732
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004733int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4734{
4735 struct net_device *dev = pci_get_drvdata(pdev);
4736 struct bnx2x *bp;
4737
4738 if (!dev) {
4739 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4740 return -ENODEV;
4741 }
4742 bp = netdev_priv(dev);
4743
4744 rtnl_lock();
4745
4746 pci_save_state(pdev);
4747
4748 if (!netif_running(dev)) {
4749 rtnl_unlock();
4750 return 0;
4751 }
4752
4753 netif_device_detach(dev);
4754
Yuval Mintz5d07d862012-09-13 02:56:21 +00004755 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004756
4757 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4758
4759 rtnl_unlock();
4760
4761 return 0;
4762}
4763
4764int bnx2x_resume(struct pci_dev *pdev)
4765{
4766 struct net_device *dev = pci_get_drvdata(pdev);
4767 struct bnx2x *bp;
4768 int rc;
4769
4770 if (!dev) {
4771 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4772 return -ENODEV;
4773 }
4774 bp = netdev_priv(dev);
4775
4776 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00004777 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004778 return -EAGAIN;
4779 }
4780
4781 rtnl_lock();
4782
4783 pci_restore_state(pdev);
4784
4785 if (!netif_running(dev)) {
4786 rtnl_unlock();
4787 return 0;
4788 }
4789
4790 bnx2x_set_power_state(bp, PCI_D0);
4791 netif_device_attach(dev);
4792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004793 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4794
4795 rtnl_unlock();
4796
4797 return rc;
4798}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004799
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004800void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4801 u32 cid)
4802{
Ariel Eliorb9871bc2013-09-04 14:09:21 +03004803 if (!cxt) {
4804 BNX2X_ERR("bad context pointer %p\n", cxt);
4805 return;
4806 }
4807
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004808 /* ustorm cxt validation */
4809 cxt->ustorm_ag_context.cdu_usage =
4810 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4811 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4812 /* xcontext validation */
4813 cxt->xstorm_ag_context.cdu_reserved =
4814 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4815 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4816}
4817
Eric Dumazet1191cb82012-04-27 21:39:21 +00004818static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4819 u8 fw_sb_id, u8 sb_index,
4820 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004821{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004822 u32 addr = BAR_CSTRORM_INTMEM +
4823 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4824 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00004825 DP(NETIF_MSG_IFUP,
4826 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4827 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004828}
4829
Eric Dumazet1191cb82012-04-27 21:39:21 +00004830static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4831 u16 fw_sb_id, u8 sb_index,
4832 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004833{
4834 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4835 u32 addr = BAR_CSTRORM_INTMEM +
4836 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004837 u8 flags = REG_RD8(bp, addr);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004838 /* clear and set */
4839 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4840 flags |= enable_flag;
Ariel Elior0c14e5c2013-04-17 22:49:06 +00004841 REG_WR8(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00004842 DP(NETIF_MSG_IFUP,
4843 "port %x fw_sb_id %d sb_index %d disable %d\n",
4844 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03004845}
4846
4847void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4848 u8 sb_index, u8 disable, u16 usec)
4849{
4850 int port = BP_PORT(bp);
4851 u8 ticks = usec / BNX2X_BTR;
4852
4853 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4854
4855 disable = disable ? 1 : (usec ? 0 : 1);
4856 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4857}