blob: d888228d0787e9795602bb8403a4dc0205e13820 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000026#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000035 * bnx2x_move_fp - move content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @from: source FP index
39 * @to: destination FP index
40 *
41 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000042 * intact. This is done by first copying the napi struct from
43 * the target to the source, and then mem copying the entire
44 * source onto the target
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Ariel Elior72754082011-11-13 04:34:31 +000050
51 /* Copy the NAPI object as it has been already initialized */
52 from_fp->napi = to_fp->napi;
53
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000054 /* Move bnx2x_fastpath contents */
55 memcpy(to_fp, from_fp, sizeof(*to_fp));
56 to_fp->index = to;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000057}
58
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030059int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000061/* free skb in the packet ring at pos idx
62 * return idx of last bd freed
63 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000064static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000065 u16 idx, unsigned int *pkts_compl,
66 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000067{
Ariel Elior6383c0b2011-07-14 08:31:57 +000068 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000069 struct eth_tx_start_bd *tx_start_bd;
70 struct eth_tx_bd *tx_data_bd;
71 struct sk_buff *skb = tx_buf->skb;
72 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
73 int nbd;
74
75 /* prefetch skb end pointer to speedup dev_kfree_skb() */
76 prefetch(&skb->end);
77
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030078 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +000079 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000080
81 /* unmap first bd */
82 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +000083 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000084 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000085 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000086
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030087
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000088 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
89#ifdef BNX2X_STOP_ON_ERROR
90 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
91 BNX2X_ERR("BAD nbd!\n");
92 bnx2x_panic();
93 }
94#endif
95 new_cons = nbd + tx_buf->first_bd;
96
97 /* Get the next bd */
98 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99
100 /* Skip a parse bd... */
101 --nbd;
102 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
103
104 /* ...and the TSO split header bd since they have no mapping */
105 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
106 --nbd;
107 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
108 }
109
110 /* now free frags */
111 while (nbd > 0) {
112
113 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000114 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
117 if (--nbd)
118 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
119 }
120
121 /* release skb */
122 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000123 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000124 (*pkts_compl)++;
125 (*bytes_compl) += skb->len;
126 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000127
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000128 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000129 tx_buf->first_bd = 0;
130 tx_buf->skb = NULL;
131
132 return new_cons;
133}
134
Ariel Elior6383c0b2011-07-14 08:31:57 +0000135int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000137 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000138 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000139 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000140
141#ifdef BNX2X_STOP_ON_ERROR
142 if (unlikely(bp->panic))
143 return -1;
144#endif
145
Ariel Elior6383c0b2011-07-14 08:31:57 +0000146 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
147 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
148 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000149
150 while (sw_cons != hw_cons) {
151 u16 pkt_cons;
152
153 pkt_cons = TX_BD(sw_cons);
154
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000155 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
156 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000157 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000158
Tom Herbert2df1a702011-11-28 16:33:37 +0000159 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
160 &pkts_compl, &bytes_compl);
161
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000162 sw_cons++;
163 }
164
Tom Herbert2df1a702011-11-28 16:33:37 +0000165 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
166
Ariel Elior6383c0b2011-07-14 08:31:57 +0000167 txdata->tx_pkt_cons = sw_cons;
168 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169
170 /* Need to make the tx_bd_cons update visible to start_xmit()
171 * before checking for netif_tx_queue_stopped(). Without the
172 * memory barrier, there is a small possibility that
173 * start_xmit() will miss it and cause the queue to be stopped
174 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300175 * On the other hand we need an rmb() here to ensure the proper
176 * ordering of bit testing in the following
177 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000178 */
179 smp_mb();
180
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000181 if (unlikely(netif_tx_queue_stopped(txq))) {
182 /* Taking tx_lock() is needed to prevent reenabling the queue
183 * while it's empty. This could have happen if rx_action() gets
184 * suspended in bnx2x_tx_int() after the condition before
185 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186 *
187 * stops the queue->sees fresh tx_bd_cons->releases the queue->
188 * sends some packets consuming the whole queue again->
189 * stops the queue
190 */
191
192 __netif_tx_lock(txq, smp_processor_id());
193
194 if ((netif_tx_queue_stopped(txq)) &&
195 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000196 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000197 netif_tx_wake_queue(txq);
198
199 __netif_tx_unlock(txq);
200 }
201 return 0;
202}
203
204static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
205 u16 idx)
206{
207 u16 last_max = fp->last_max_sge;
208
209 if (SUB_S16(idx, last_max) > 0)
210 fp->last_max_sge = idx;
211}
212
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000213static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214 u16 sge_len,
215 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000216{
217 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000218 u16 last_max, last_elem, first_elem;
219 u16 delta = 0;
220 u16 i;
221
222 if (!sge_len)
223 return;
224
225 /* First mark all used pages */
226 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300227 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000228 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000229
230 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000231 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000232
233 /* Here we assume that the last SGE index is the biggest */
234 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000235 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000236 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000237
238 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300239 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
240 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000241
242 /* If ring is not full */
243 if (last_elem + 1 != first_elem)
244 last_elem++;
245
246 /* Now update the prod */
247 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
248 if (likely(fp->sge_mask[i]))
249 break;
250
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300251 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
252 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000253 }
254
255 if (delta > 0) {
256 fp->rx_sge_prod += delta;
257 /* clear page-end entries */
258 bnx2x_clear_sge_mask_next_elems(fp);
259 }
260
261 DP(NETIF_MSG_RX_STATUS,
262 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
263 fp->last_max_sge, fp->rx_sge_prod);
264}
265
Eric Dumazete52fcb22011-11-14 06:05:34 +0000266/* Set Toeplitz hash value in the skb using the value from the
267 * CQE (calculated by HW).
268 */
269static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
270 const struct eth_fast_path_rx_cqe *cqe)
271{
272 /* Set Toeplitz hash from CQE */
273 if ((bp->dev->features & NETIF_F_RXHASH) &&
274 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
275 return le32_to_cpu(cqe->rss_hash_result);
276 return 0;
277}
278
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000280 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300281 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000282{
283 struct bnx2x *bp = fp->bp;
284 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
285 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
286 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
287 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300288 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
289 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000290
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300291 /* print error if current state != stop */
292 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000293 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
294
Eric Dumazete52fcb22011-11-14 06:05:34 +0000295 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300296 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000297 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300298 fp->rx_buf_size, DMA_FROM_DEVICE);
299 /*
300 * ...if it fails - move the skb from the consumer to the producer
301 * and set the current aggregation state as ERROR to drop it
302 * when TPA_STOP arrives.
303 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000304
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300305 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
306 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000307 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300308 tpa_info->tpa_state = BNX2X_TPA_ERROR;
309 return;
310 }
311
Eric Dumazete52fcb22011-11-14 06:05:34 +0000312 /* move empty data from pool to prod */
313 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300314 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000315 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000316 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
317 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 /* move partial skb from cons to pool (don't unmap yet) */
320 *first_buf = *cons_rx_buf;
321
322 /* mark bin state as START */
323 tpa_info->parsing_flags =
324 le16_to_cpu(cqe->pars_flags.flags);
325 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
326 tpa_info->tpa_state = BNX2X_TPA_START;
327 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
328 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000329 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000330 if (fp->mode == TPA_MODE_GRO) {
331 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
332 tpa_info->full_page =
333 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
Dmitry Kravkovfe603b42012-02-20 09:59:11 +0000334 /*
335 * FW 7.2.16 BUG workaround:
336 * if SGE size is (exactly) multiple gro_size
337 * fw will place one less frag on SGE.
338 * the calculation is done only for potentially
339 * dangerous MTUs.
340 */
341 if (unlikely(bp->gro_check))
342 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
343 tpa_info->full_page -= gro_size;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000344 tpa_info->gro_size = gro_size;
345 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300346
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000347#ifdef BNX2X_STOP_ON_ERROR
348 fp->tpa_queue_used |= (1 << queue);
349#ifdef _ASM_GENERIC_INT_L64_H
350 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
351#else
352 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
353#endif
354 fp->tpa_queue_used);
355#endif
356}
357
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000358/* Timestamp option length allowed for TPA aggregation:
359 *
360 * nop nop kind length echo val
361 */
362#define TPA_TSTAMP_OPT_LEN 12
363/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000364 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000365 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000366 * @bp: driver handle
367 * @parsing_flags: parsing flags from the START CQE
368 * @len_on_bd: total length of the first packet for the
369 * aggregation.
370 *
371 * Approximate value of the MSS for this aggregation calculated using
372 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000373 */
374static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
375 u16 len_on_bd)
376{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300377 /*
378 * TPA arrgregation won't have either IP options or TCP options
379 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000380 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300381 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
382
383 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
384 PRS_FLAG_OVERETH_IPV6)
385 hdrs_len += sizeof(struct ipv6hdr);
386 else /* IPv4 */
387 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000388
389
390 /* Check if there was a TCP timestamp, if there is it's will
391 * always be 12 bytes length: nop nop kind length echo val.
392 *
393 * Otherwise FW would close the aggregation.
394 */
395 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
396 hdrs_len += TPA_TSTAMP_OPT_LEN;
397
398 return len_on_bd - hdrs_len;
399}
400
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000401static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000402 struct bnx2x_agg_info *tpa_info,
403 u16 pages,
404 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300405 struct eth_end_agg_rx_cqe *cqe,
406 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000407{
408 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000409 u32 i, frag_len, frag_size;
410 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300411 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000412 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000413
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300414 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000415
416 if (fp->mode == TPA_MODE_GRO) {
417 gro_size = tpa_info->gro_size;
418 full_page = tpa_info->full_page;
419 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000420
421 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000422 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300423 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
424 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000425
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000426 /* set for GRO */
427 if (fp->mode == TPA_MODE_GRO)
428 skb_shinfo(skb)->gso_type =
429 (GET_FLAG(tpa_info->parsing_flags,
430 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
431 PRS_FLAG_OVERETH_IPV6) ?
432 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
433 }
434
435
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000436#ifdef BNX2X_STOP_ON_ERROR
437 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
438 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
439 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000441 bnx2x_panic();
442 return -EINVAL;
443 }
444#endif
445
446 /* Run through the SGL and compose the fragmented skb */
447 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300448 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000449
450 /* FW gives the indices of the SGE as if the ring is an array
451 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000452 if (fp->mode == TPA_MODE_GRO)
453 frag_len = min_t(u32, frag_size, (u32)full_page);
454 else /* LRO */
455 frag_len = min_t(u32, frag_size,
456 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
457
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000458 rx_pg = &fp->rx_page_ring[sge_idx];
459 old_rx_pg = *rx_pg;
460
461 /* If we fail to allocate a substitute page, we simply stop
462 where we are and drop the whole packet */
463 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
464 if (unlikely(err)) {
465 fp->eth_q_stats.rx_skb_alloc_failed++;
466 return err;
467 }
468
469 /* Unmap the page as we r going to pass it to the stack */
470 dma_unmap_page(&bp->pdev->dev,
471 dma_unmap_addr(&old_rx_pg, mapping),
472 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000473 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000474 if (fp->mode == TPA_MODE_LRO)
475 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
476 else { /* GRO */
477 int rem;
478 int offset = 0;
479 for (rem = frag_len; rem > 0; rem -= gro_size) {
480 int len = rem > gro_size ? gro_size : rem;
481 skb_fill_page_desc(skb, frag_id++,
482 old_rx_pg.page, offset, len);
483 if (offset)
484 get_page(old_rx_pg.page);
485 offset += len;
486 }
487 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000488
489 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000490 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000491 skb->len += frag_len;
492
493 frag_size -= frag_len;
494 }
495
496 return 0;
497}
498
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000499static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
500 struct bnx2x_agg_info *tpa_info,
501 u16 pages,
502 struct eth_end_agg_rx_cqe *cqe,
503 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000504{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300505 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000506 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300507 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000508 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000509 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300510 u8 old_tpa_state = tpa_info->tpa_state;
511
512 tpa_info->tpa_state = BNX2X_TPA_STOP;
513
514 /* If we there was an error during the handling of the TPA_START -
515 * drop this aggregation.
516 */
517 if (old_tpa_state == BNX2X_TPA_ERROR)
518 goto drop;
519
Eric Dumazete52fcb22011-11-14 06:05:34 +0000520 /* Try to allocate the new data */
521 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000522
523 /* Unmap skb in the pool anyway, as we are going to change
524 pool entry status to BNX2X_TPA_STOP even if new skb allocation
525 fails. */
526 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800527 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000528 if (likely(new_data))
529 skb = build_skb(data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000530
Eric Dumazete52fcb22011-11-14 06:05:34 +0000531 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000532#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800533 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000534 BNX2X_ERR("skb_put is about to fail... "
535 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800536 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000537 bnx2x_panic();
538 return;
539 }
540#endif
541
Eric Dumazete52fcb22011-11-14 06:05:34 +0000542 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000544 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000545
546 skb->protocol = eth_type_trans(skb, bp->dev);
547 skb->ip_summed = CHECKSUM_UNNECESSARY;
548
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000549 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
550 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300551 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
552 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000553 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000554 } else {
555 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
556 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000557 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000558 }
559
560
Eric Dumazete52fcb22011-11-14 06:05:34 +0000561 /* put new data in bin */
562 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000563
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300564 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000565 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000566 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300567drop:
568 /* drop the packet and keep the buffer in the bin */
569 DP(NETIF_MSG_RX_STATUS,
570 "Failed to allocate or map a new skb - dropping packet!\n");
571 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000572}
573
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000574
575int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
576{
577 struct bnx2x *bp = fp->bp;
578 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
579 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
580 int rx_pkt = 0;
581
582#ifdef BNX2X_STOP_ON_ERROR
583 if (unlikely(bp->panic))
584 return 0;
585#endif
586
587 /* CQ "next element" is of the size of the regular element,
588 that's why it's ok here */
589 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
590 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
591 hw_comp_cons++;
592
593 bd_cons = fp->rx_bd_cons;
594 bd_prod = fp->rx_bd_prod;
595 bd_prod_fw = bd_prod;
596 sw_comp_cons = fp->rx_comp_cons;
597 sw_comp_prod = fp->rx_comp_prod;
598
599 /* Memory barrier necessary as speculative reads of the rx
600 * buffer can be ahead of the index in the status block
601 */
602 rmb();
603
604 DP(NETIF_MSG_RX_STATUS,
605 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
606 fp->index, hw_comp_cons, sw_comp_cons);
607
608 while (sw_comp_cons != hw_comp_cons) {
609 struct sw_rx_bd *rx_buf = NULL;
610 struct sk_buff *skb;
611 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300612 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000613 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300614 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000615 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000616 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000617
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300618#ifdef BNX2X_STOP_ON_ERROR
619 if (unlikely(bp->panic))
620 return 0;
621#endif
622
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000623 comp_ring_cons = RCQ_BD(sw_comp_cons);
624 bd_prod = RX_BD(bd_prod);
625 bd_cons = RX_BD(bd_cons);
626
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000627 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300628 cqe_fp = &cqe->fast_path_cqe;
629 cqe_fp_flags = cqe_fp->type_error_flags;
630 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000631
632 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
633 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634 cqe_fp_flags, cqe_fp->status_flags,
635 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000636 le16_to_cpu(cqe_fp->vlan_tag),
637 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000638
639 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300640 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000641 bnx2x_sp_event(fp, cqe);
642 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000643 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000644
Eric Dumazete52fcb22011-11-14 06:05:34 +0000645 rx_buf = &fp->rx_buf_ring[bd_cons];
646 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000647
Eric Dumazete52fcb22011-11-14 06:05:34 +0000648 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000649 struct bnx2x_agg_info *tpa_info;
650 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300651#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000652 /* sanity check */
653 if (fp->disable_tpa &&
654 (CQE_TYPE_START(cqe_fp_type) ||
655 CQE_TYPE_STOP(cqe_fp_type)))
656 BNX2X_ERR("START/STOP packet while "
657 "disable_tpa type %x\n",
658 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300659#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000660
Eric Dumazete52fcb22011-11-14 06:05:34 +0000661 if (CQE_TYPE_START(cqe_fp_type)) {
662 u16 queue = cqe_fp->queue_index;
663 DP(NETIF_MSG_RX_STATUS,
664 "calling tpa_start on queue %d\n",
665 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000666
Eric Dumazete52fcb22011-11-14 06:05:34 +0000667 bnx2x_tpa_start(fp, queue,
668 bd_cons, bd_prod,
669 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000670
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000671 goto next_rx;
672
673 }
674 queue = cqe->end_agg_cqe.queue_index;
675 tpa_info = &fp->tpa_info[queue];
676 DP(NETIF_MSG_RX_STATUS,
677 "calling tpa_stop on queue %d\n",
678 queue);
679
680 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
681 tpa_info->len_on_bd;
682
683 if (fp->mode == TPA_MODE_GRO)
684 pages = (frag_size + tpa_info->full_page - 1) /
685 tpa_info->full_page;
686 else
687 pages = SGE_PAGE_ALIGN(frag_size) >>
688 SGE_PAGE_SHIFT;
689
690 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
691 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000692#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000693 if (bp->panic)
694 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000695#endif
696
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000697 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
698 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000699 }
700 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000701 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000702 pad = cqe_fp->placement_offset;
703 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000704 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000705 pad + RX_COPY_THRESH,
706 DMA_FROM_DEVICE);
707 pad += NET_SKB_PAD;
708 prefetch(data + pad); /* speedup eth_type_trans() */
709 /* is this an error packet? */
710 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
711 DP(NETIF_MSG_RX_ERR,
712 "ERROR flags %x rx packet %u\n",
713 cqe_fp_flags, sw_comp_cons);
714 fp->eth_q_stats.rx_err_discard_pkt++;
715 goto reuse_rx;
716 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000717
Eric Dumazete52fcb22011-11-14 06:05:34 +0000718 /* Since we don't have a jumbo ring
719 * copy small packets if mtu > 1500
720 */
721 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
722 (len <= RX_COPY_THRESH)) {
723 skb = netdev_alloc_skb_ip_align(bp->dev, len);
724 if (skb == NULL) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725 DP(NETIF_MSG_RX_ERR,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000726 "ERROR packet dropped because of alloc failure\n");
727 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000728 goto reuse_rx;
729 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000730 memcpy(skb->data, data + pad, len);
731 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
732 } else {
733 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000734 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000735 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800736 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000737 DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000738 skb = build_skb(data);
739 if (unlikely(!skb)) {
740 kfree(data);
741 fp->eth_q_stats.rx_skb_alloc_failed++;
742 goto next_rx;
743 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000744 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000745 } else {
746 DP(NETIF_MSG_RX_ERR,
747 "ERROR packet dropped because "
748 "of alloc failure\n");
749 fp->eth_q_stats.rx_skb_alloc_failed++;
750reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000751 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000752 goto next_rx;
753 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000754 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000755
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000756 skb_put(skb, len);
757 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000758
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000759 /* Set Toeplitz hash for a none-LRO skb */
760 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000761
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000762 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000763
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000764 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300765
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000766 if (likely(BNX2X_RX_CSUM_OK(cqe)))
767 skb->ip_summed = CHECKSUM_UNNECESSARY;
768 else
769 fp->eth_q_stats.hw_csum_err++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000770 }
771
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000772 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000773
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300774 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
775 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000776 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300777 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000778 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000779
780
781next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000782 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000783
784 bd_cons = NEXT_RX_IDX(bd_cons);
785 bd_prod = NEXT_RX_IDX(bd_prod);
786 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
787 rx_pkt++;
788next_cqe:
789 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
790 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
791
792 if (rx_pkt == budget)
793 break;
794 } /* while */
795
796 fp->rx_bd_cons = bd_cons;
797 fp->rx_bd_prod = bd_prod_fw;
798 fp->rx_comp_cons = sw_comp_cons;
799 fp->rx_comp_prod = sw_comp_prod;
800
801 /* Update producers */
802 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
803 fp->rx_sge_prod);
804
805 fp->rx_pkt += rx_pkt;
806 fp->rx_calls++;
807
808 return rx_pkt;
809}
810
811static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
812{
813 struct bnx2x_fastpath *fp = fp_cookie;
814 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000815 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000816
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000817 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
818 "[fp %d fw_sd %d igusb %d]\n",
819 fp->index, fp->fw_sb_id, fp->igu_sb_id);
820 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000821
822#ifdef BNX2X_STOP_ON_ERROR
823 if (unlikely(bp->panic))
824 return IRQ_HANDLED;
825#endif
826
827 /* Handle Rx and Tx according to MSI-X vector */
828 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000829
830 for_each_cos_in_tx_queue(fp, cos)
831 prefetch(fp->txdata[cos].tx_cons_sb);
832
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000833 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000834 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
835
836 return IRQ_HANDLED;
837}
838
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000839/* HW Lock for shared dual port PHYs */
840void bnx2x_acquire_phy_lock(struct bnx2x *bp)
841{
842 mutex_lock(&bp->port.phy_mutex);
843
844 if (bp->port.need_hw_lock)
845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
846}
847
848void bnx2x_release_phy_lock(struct bnx2x *bp)
849{
850 if (bp->port.need_hw_lock)
851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
852
853 mutex_unlock(&bp->port.phy_mutex);
854}
855
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800856/* calculates MF speed according to current linespeed and MF configuration */
857u16 bnx2x_get_mf_speed(struct bnx2x *bp)
858{
859 u16 line_speed = bp->link_vars.line_speed;
860 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000861 u16 maxCfg = bnx2x_extract_max_cfg(bp,
862 bp->mf_config[BP_VN(bp)]);
863
864 /* Calculate the current MAX line speed limit for the MF
865 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800866 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000867 if (IS_MF_SI(bp))
868 line_speed = (line_speed * maxCfg) / 100;
869 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800870 u16 vn_max_rate = maxCfg * 100;
871
872 if (vn_max_rate < line_speed)
873 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000874 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800875 }
876
877 return line_speed;
878}
879
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000880/**
881 * bnx2x_fill_report_data - fill link report data to report
882 *
883 * @bp: driver handle
884 * @data: link state to update
885 *
886 * It uses a none-atomic bit operations because is called under the mutex.
887 */
888static inline void bnx2x_fill_report_data(struct bnx2x *bp,
889 struct bnx2x_link_report_data *data)
890{
891 u16 line_speed = bnx2x_get_mf_speed(bp);
892
893 memset(data, 0, sizeof(*data));
894
895 /* Fill the report data: efective line speed */
896 data->line_speed = line_speed;
897
898 /* Link is down */
899 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
900 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
901 &data->link_report_flags);
902
903 /* Full DUPLEX */
904 if (bp->link_vars.duplex == DUPLEX_FULL)
905 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
906
907 /* Rx Flow Control is ON */
908 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
909 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
910
911 /* Tx Flow Control is ON */
912 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
913 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
914}
915
916/**
917 * bnx2x_link_report - report link status to OS.
918 *
919 * @bp: driver handle
920 *
921 * Calls the __bnx2x_link_report() under the same locking scheme
922 * as a link/PHY state managing code to ensure a consistent link
923 * reporting.
924 */
925
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000926void bnx2x_link_report(struct bnx2x *bp)
927{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000928 bnx2x_acquire_phy_lock(bp);
929 __bnx2x_link_report(bp);
930 bnx2x_release_phy_lock(bp);
931}
932
933/**
934 * __bnx2x_link_report - report link status to OS.
935 *
936 * @bp: driver handle
937 *
938 * None atomic inmlementation.
939 * Should be called under the phy_lock.
940 */
941void __bnx2x_link_report(struct bnx2x *bp)
942{
943 struct bnx2x_link_report_data cur_data;
944
945 /* reread mf_cfg */
946 if (!CHIP_IS_E1(bp))
947 bnx2x_read_mf_cfg(bp);
948
949 /* Read the current link report info */
950 bnx2x_fill_report_data(bp, &cur_data);
951
952 /* Don't report link down or exactly the same link status twice */
953 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
954 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
955 &bp->last_reported_link.link_report_flags) &&
956 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
957 &cur_data.link_report_flags)))
958 return;
959
960 bp->link_cnt++;
961
962 /* We are going to report a new link parameters now -
963 * remember the current data for the next time.
964 */
965 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
966
967 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
968 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000969 netif_carrier_off(bp->dev);
970 netdev_err(bp->dev, "NIC Link is Down\n");
971 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000972 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000973 const char *duplex;
974 const char *flow;
975
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000976 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000977
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000978 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
979 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000980 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000981 else
Joe Perches94f05b02011-08-14 12:16:20 +0000982 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000983
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000984 /* Handle the FC at the end so that only these flags would be
985 * possibly set. This way we may easily check if there is no FC
986 * enabled.
987 */
988 if (cur_data.link_report_flags) {
989 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
990 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000991 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
992 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000993 flow = "ON - receive & transmit";
994 else
995 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000996 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000997 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000998 }
Joe Perches94f05b02011-08-14 12:16:20 +0000999 } else {
1000 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001 }
Joe Perches94f05b02011-08-14 12:16:20 +00001002 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1003 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001004 }
1005}
1006
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001007void bnx2x_init_rx_rings(struct bnx2x *bp)
1008{
1009 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001010 u16 ring_prod;
1011 int i, j;
1012
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001013 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001014 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001015 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001016
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001017 DP(NETIF_MSG_IFUP,
1018 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1019
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001020 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001021 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001022 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001023 struct bnx2x_agg_info *tpa_info =
1024 &fp->tpa_info[i];
1025 struct sw_rx_bd *first_buf =
1026 &tpa_info->first_buf;
1027
Eric Dumazete52fcb22011-11-14 06:05:34 +00001028 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1029 GFP_ATOMIC);
1030 if (!first_buf->data) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001031 BNX2X_ERR("Failed to allocate TPA "
1032 "skb pool for queue[%d] - "
1033 "disabling TPA on this "
1034 "queue!\n", j);
1035 bnx2x_free_tpa_pool(bp, fp, i);
1036 fp->disable_tpa = 1;
1037 break;
1038 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001039 dma_unmap_addr_set(first_buf, mapping, 0);
1040 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001042
1043 /* "next page" elements initialization */
1044 bnx2x_set_next_page_sgl(fp);
1045
1046 /* set SGEs bit mask */
1047 bnx2x_init_sge_ring_bit_mask(fp);
1048
1049 /* Allocate SGEs and initialize the ring elements */
1050 for (i = 0, ring_prod = 0;
1051 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1052
1053 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1054 BNX2X_ERR("was only able to allocate "
1055 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001056 BNX2X_ERR("disabling TPA for "
1057 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001058 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001059 bnx2x_free_rx_sge_range(bp, fp,
1060 ring_prod);
1061 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001062 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001063 fp->disable_tpa = 1;
1064 ring_prod = 0;
1065 break;
1066 }
1067 ring_prod = NEXT_SGE_IDX(ring_prod);
1068 }
1069
1070 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071 }
1072 }
1073
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001074 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075 struct bnx2x_fastpath *fp = &bp->fp[j];
1076
1077 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001078
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001079 /* Activate BD ring */
1080 /* Warning!
1081 * this will generate an interrupt (to the TSTORM)
1082 * must only be done after chip is initialized
1083 */
1084 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1085 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001087 if (j != 0)
1088 continue;
1089
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001090 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001091 REG_WR(bp, BAR_USTRORM_INTMEM +
1092 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1093 U64_LO(fp->rx_comp_mapping));
1094 REG_WR(bp, BAR_USTRORM_INTMEM +
1095 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1096 U64_HI(fp->rx_comp_mapping));
1097 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098 }
1099}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001100
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001101static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1102{
1103 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001104 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001105
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001106 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001107 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001108 for_each_cos_in_tx_queue(fp, cos) {
1109 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001110 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001111
Ariel Elior6383c0b2011-07-14 08:31:57 +00001112 u16 sw_prod = txdata->tx_pkt_prod;
1113 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001114
Ariel Elior6383c0b2011-07-14 08:31:57 +00001115 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001116 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1117 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001118 sw_cons++;
1119 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001120 netdev_tx_reset_queue(
1121 netdev_get_tx_queue(bp->dev, txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001122 }
1123 }
1124}
1125
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001126static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1127{
1128 struct bnx2x *bp = fp->bp;
1129 int i;
1130
1131 /* ring wasn't allocated */
1132 if (fp->rx_buf_ring == NULL)
1133 return;
1134
1135 for (i = 0; i < NUM_RX_BD; i++) {
1136 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001137 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001138
Eric Dumazete52fcb22011-11-14 06:05:34 +00001139 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001140 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001141 dma_unmap_single(&bp->pdev->dev,
1142 dma_unmap_addr(rx_buf, mapping),
1143 fp->rx_buf_size, DMA_FROM_DEVICE);
1144
Eric Dumazete52fcb22011-11-14 06:05:34 +00001145 rx_buf->data = NULL;
1146 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001147 }
1148}
1149
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001150static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1151{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001152 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001153
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001154 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001155 struct bnx2x_fastpath *fp = &bp->fp[j];
1156
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001157 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001158
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001159 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001160 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001161 }
1162}
1163
1164void bnx2x_free_skbs(struct bnx2x *bp)
1165{
1166 bnx2x_free_tx_skbs(bp);
1167 bnx2x_free_rx_skbs(bp);
1168}
1169
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001170void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1171{
1172 /* load old values */
1173 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1174
1175 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1176 /* leave all but MAX value */
1177 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1178
1179 /* set new MAX value */
1180 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1181 & FUNC_MF_CFG_MAX_BW_MASK;
1182
1183 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1184 }
1185}
1186
Dmitry Kravkovca924292011-06-14 01:33:08 +00001187/**
1188 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1189 *
1190 * @bp: driver handle
1191 * @nvecs: number of vectors to be released
1192 */
1193static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001194{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001195 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001196
Dmitry Kravkovca924292011-06-14 01:33:08 +00001197 if (nvecs == offset)
1198 return;
1199 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001200 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001201 bp->msix_table[offset].vector);
1202 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001204 if (nvecs == offset)
1205 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001206 offset++;
1207#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001208
Dmitry Kravkovca924292011-06-14 01:33:08 +00001209 for_each_eth_queue(bp, i) {
1210 if (nvecs == offset)
1211 return;
1212 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1213 "irq\n", i, bp->msix_table[offset].vector);
1214
1215 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001216 }
1217}
1218
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001219void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001221 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001222 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001223 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001224 else if (bp->flags & USING_MSI_FLAG)
1225 free_irq(bp->pdev->irq, bp->dev);
1226 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001227 free_irq(bp->pdev->irq, bp->dev);
1228}
1229
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001230int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001231{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001232 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001233
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001234 bp->msix_table[msix_vec].entry = msix_vec;
1235 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1236 bp->msix_table[0].entry);
1237 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001238
1239#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001240 bp->msix_table[msix_vec].entry = msix_vec;
1241 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1242 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1243 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001244#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001245 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001246 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001247 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001248 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001249 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1250 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001251 }
1252
Ariel Elior6383c0b2011-07-14 08:31:57 +00001253 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001254
1255 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001256
1257 /*
1258 * reconfigure number of tx/rx queues according to available
1259 * MSI-X vectors
1260 */
1261 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001262 /* how less vectors we will have? */
1263 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001264
1265 DP(NETIF_MSG_IFUP,
1266 "Trying to use less MSI-X vectors: %d\n", rc);
1267
1268 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1269
1270 if (rc) {
1271 DP(NETIF_MSG_IFUP,
1272 "MSI-X is not attainable rc %d\n", rc);
1273 return rc;
1274 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001275 /*
1276 * decrease number of queues by number of unallocated entries
1277 */
1278 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001279
1280 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1281 bp->num_queues);
1282 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001283 /* fall to INTx if not enough memory */
1284 if (rc == -ENOMEM)
1285 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001286 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1287 return rc;
1288 }
1289
1290 bp->flags |= USING_MSIX_FLAG;
1291
1292 return 0;
1293}
1294
1295static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1296{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001297 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001298
Dmitry Kravkovca924292011-06-14 01:33:08 +00001299 rc = request_irq(bp->msix_table[offset++].vector,
1300 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001301 bp->dev->name, bp->dev);
1302 if (rc) {
1303 BNX2X_ERR("request sp irq failed\n");
1304 return -EBUSY;
1305 }
1306
1307#ifdef BCM_CNIC
1308 offset++;
1309#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001310 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001311 struct bnx2x_fastpath *fp = &bp->fp[i];
1312 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1313 bp->dev->name, i);
1314
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001315 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001316 bnx2x_msix_fp_int, 0, fp->name, fp);
1317 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001318 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1319 bp->msix_table[offset].vector, rc);
1320 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001321 return -EBUSY;
1322 }
1323
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001324 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001325 }
1326
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001327 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001328 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001329 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1330 " ... fp[%d] %d\n",
1331 bp->msix_table[0].vector,
1332 0, bp->msix_table[offset].vector,
1333 i - 1, bp->msix_table[offset + i - 1].vector);
1334
1335 return 0;
1336}
1337
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001338int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001339{
1340 int rc;
1341
1342 rc = pci_enable_msi(bp->pdev);
1343 if (rc) {
1344 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1345 return -1;
1346 }
1347 bp->flags |= USING_MSI_FLAG;
1348
1349 return 0;
1350}
1351
1352static int bnx2x_req_irq(struct bnx2x *bp)
1353{
1354 unsigned long flags;
1355 int rc;
1356
1357 if (bp->flags & USING_MSI_FLAG)
1358 flags = 0;
1359 else
1360 flags = IRQF_SHARED;
1361
1362 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1363 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364 return rc;
1365}
1366
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001367static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1368{
1369 int rc = 0;
1370 if (bp->flags & USING_MSIX_FLAG) {
1371 rc = bnx2x_req_msix_irqs(bp);
1372 if (rc)
1373 return rc;
1374 } else {
1375 bnx2x_ack_int(bp);
1376 rc = bnx2x_req_irq(bp);
1377 if (rc) {
1378 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1379 return rc;
1380 }
1381 if (bp->flags & USING_MSI_FLAG) {
1382 bp->dev->irq = bp->pdev->irq;
1383 netdev_info(bp->dev, "using MSI IRQ %d\n",
1384 bp->pdev->irq);
1385 }
1386 }
1387
1388 return 0;
1389}
1390
1391static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001392{
1393 int i;
1394
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001395 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001396 napi_enable(&bnx2x_fp(bp, i, napi));
1397}
1398
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001399static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001400{
1401 int i;
1402
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001403 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001404 napi_disable(&bnx2x_fp(bp, i, napi));
1405}
1406
1407void bnx2x_netif_start(struct bnx2x *bp)
1408{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001409 if (netif_running(bp->dev)) {
1410 bnx2x_napi_enable(bp);
1411 bnx2x_int_enable(bp);
1412 if (bp->state == BNX2X_STATE_OPEN)
1413 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001414 }
1415}
1416
1417void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1418{
1419 bnx2x_int_disable_sync(bp, disable_hw);
1420 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001421}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001422
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001423u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1424{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001425 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001426
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001427#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001428 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001429 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1430 u16 ether_type = ntohs(hdr->h_proto);
1431
1432 /* Skip VLAN tag if present */
1433 if (ether_type == ETH_P_8021Q) {
1434 struct vlan_ethhdr *vhdr =
1435 (struct vlan_ethhdr *)skb->data;
1436
1437 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1438 }
1439
1440 /* If ethertype is FCoE or FIP - use FCoE ring */
1441 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001442 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001443 }
1444#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001445 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001446 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001447}
1448
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001449void bnx2x_set_num_queues(struct bnx2x *bp)
1450{
1451 switch (bp->multi_mode) {
1452 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001453 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001454 break;
1455 case ETH_RSS_MODE_REGULAR:
1456 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001457 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001458
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001459 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001460 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001461 break;
1462 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001463
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001464#ifdef BCM_CNIC
1465 /* override in ISCSI SD mod */
1466 if (IS_MF_ISCSI_SD(bp))
1467 bp->num_queues = 1;
1468#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001469 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001470 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001471}
1472
David S. Miller823dcd22011-08-20 10:39:12 -07001473/**
1474 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1475 *
1476 * @bp: Driver handle
1477 *
1478 * We currently support for at most 16 Tx queues for each CoS thus we will
1479 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1480 * bp->max_cos.
1481 *
1482 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1483 * index after all ETH L2 indices.
1484 *
1485 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1486 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1487 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1488 *
1489 * The proper configuration of skb->queue_mapping is handled by
1490 * bnx2x_select_queue() and __skb_tx_hash().
1491 *
1492 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1493 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1494 */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001495static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1496{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001497 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001498
Ariel Elior6383c0b2011-07-14 08:31:57 +00001499 tx = MAX_TXQS_PER_COS * bp->max_cos;
1500 rx = BNX2X_NUM_ETH_QUEUES(bp);
1501
1502/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001503#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001504 if (!NO_FCOE(bp)) {
1505 rx += FCOE_PRESENT;
1506 tx += FCOE_PRESENT;
1507 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001508#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001509
1510 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1511 if (rc) {
1512 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1513 return rc;
1514 }
1515 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1516 if (rc) {
1517 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1518 return rc;
1519 }
1520
1521 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1522 tx, rx);
1523
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001524 return rc;
1525}
1526
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001527static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1528{
1529 int i;
1530
1531 for_each_queue(bp, i) {
1532 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001533 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001534
1535 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1536 if (IS_FCOE_IDX(i))
1537 /*
1538 * Although there are no IP frames expected to arrive to
1539 * this ring we still want to add an
1540 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1541 * overrun attack.
1542 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001543 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001544 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001545 mtu = bp->dev->mtu;
1546 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1547 IP_HEADER_ALIGNMENT_PADDING +
1548 ETH_OVREHEAD +
1549 mtu +
1550 BNX2X_FW_RX_ALIGN_END;
1551 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001552 }
1553}
1554
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001555static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1556{
1557 int i;
1558 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1559 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1560
1561 /*
1562 * Prepare the inital contents fo the indirection table if RSS is
1563 * enabled
1564 */
1565 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1566 for (i = 0; i < sizeof(ind_table); i++)
1567 ind_table[i] =
Ben Hutchings278bc422011-12-15 13:56:49 +00001568 bp->fp->cl_id +
1569 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001570 }
1571
1572 /*
1573 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1574 * per-port, so if explicit configuration is needed , do it only
1575 * for a PMF.
1576 *
1577 * For 57712 and newer on the other hand it's a per-function
1578 * configuration.
1579 */
1580 return bnx2x_config_rss_pf(bp, ind_table,
1581 bp->port.pmf || !CHIP_IS_E1x(bp));
1582}
1583
1584int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1585{
1586 struct bnx2x_config_rss_params params = {0};
1587 int i;
1588
1589 /* Although RSS is meaningless when there is a single HW queue we
1590 * still need it enabled in order to have HW Rx hash generated.
1591 *
1592 * if (!is_eth_multi(bp))
1593 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1594 */
1595
1596 params.rss_obj = &bp->rss_conf_obj;
1597
1598 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1599
1600 /* RSS mode */
1601 switch (bp->multi_mode) {
1602 case ETH_RSS_MODE_DISABLED:
1603 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1604 break;
1605 case ETH_RSS_MODE_REGULAR:
1606 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1607 break;
1608 case ETH_RSS_MODE_VLAN_PRI:
1609 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1610 break;
1611 case ETH_RSS_MODE_E1HOV_PRI:
1612 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1613 break;
1614 case ETH_RSS_MODE_IP_DSCP:
1615 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1616 break;
1617 default:
1618 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1619 return -EINVAL;
1620 }
1621
1622 /* If RSS is enabled */
1623 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1624 /* RSS configuration */
1625 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1626 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1627 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1628 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1629
1630 /* Hash bits */
1631 params.rss_result_mask = MULTI_MASK;
1632
1633 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1634
1635 if (config_hash) {
1636 /* RSS keys */
1637 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1638 params.rss_key[i] = random32();
1639
1640 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1641 }
1642 }
1643
1644 return bnx2x_config_rss(bp, &params);
1645}
1646
1647static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1648{
1649 struct bnx2x_func_state_params func_params = {0};
1650
1651 /* Prepare parameters for function state transitions */
1652 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1653
1654 func_params.f_obj = &bp->func_obj;
1655 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1656
1657 func_params.params.hw_init.load_phase = load_code;
1658
1659 return bnx2x_func_state_change(bp, &func_params);
1660}
1661
1662/*
1663 * Cleans the object that have internal lists without sending
1664 * ramrods. Should be run when interrutps are disabled.
1665 */
1666static void bnx2x_squeeze_objects(struct bnx2x *bp)
1667{
1668 int rc;
1669 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1670 struct bnx2x_mcast_ramrod_params rparam = {0};
1671 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1672
1673 /***************** Cleanup MACs' object first *************************/
1674
1675 /* Wait for completion of requested */
1676 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1677 /* Perform a dry cleanup */
1678 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1679
1680 /* Clean ETH primary MAC */
1681 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1682 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1683 &ramrod_flags);
1684 if (rc != 0)
1685 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1686
1687 /* Cleanup UC list */
1688 vlan_mac_flags = 0;
1689 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1690 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1691 &ramrod_flags);
1692 if (rc != 0)
1693 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1694
1695 /***************** Now clean mcast object *****************************/
1696 rparam.mcast_obj = &bp->mcast_obj;
1697 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1698
1699 /* Add a DEL command... */
1700 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1701 if (rc < 0)
1702 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1703 "object: %d\n", rc);
1704
1705 /* ...and wait until all pending commands are cleared */
1706 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1707 while (rc != 0) {
1708 if (rc < 0) {
1709 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1710 rc);
1711 return;
1712 }
1713
1714 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1715 }
1716}
1717
1718#ifndef BNX2X_STOP_ON_ERROR
1719#define LOAD_ERROR_EXIT(bp, label) \
1720 do { \
1721 (bp)->state = BNX2X_STATE_ERROR; \
1722 goto label; \
1723 } while (0)
1724#else
1725#define LOAD_ERROR_EXIT(bp, label) \
1726 do { \
1727 (bp)->state = BNX2X_STATE_ERROR; \
1728 (bp)->panic = 1; \
1729 return -EBUSY; \
1730 } while (0)
1731#endif
1732
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001733/* must be called with rtnl_lock */
1734int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1735{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001736 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001737 u32 load_code;
1738 int i, rc;
1739
1740#ifdef BNX2X_STOP_ON_ERROR
1741 if (unlikely(bp->panic))
1742 return -EPERM;
1743#endif
1744
1745 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1746
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001747 /* Set the initial link reported state to link down */
1748 bnx2x_acquire_phy_lock(bp);
1749 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1750 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1751 &bp->last_reported_link.link_report_flags);
1752 bnx2x_release_phy_lock(bp);
1753
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001754 /* must be called before memory allocation and HW init */
1755 bnx2x_ilt_set_info(bp);
1756
Ariel Elior6383c0b2011-07-14 08:31:57 +00001757 /*
1758 * Zero fastpath structures preserving invariants like napi, which are
1759 * allocated only once, fp index, max_cos, bp pointer.
1760 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001761 */
1762 for_each_queue(bp, i)
1763 bnx2x_bz_fp(bp, i);
1764
Ariel Elior6383c0b2011-07-14 08:31:57 +00001765
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001766 /* Set the receive queues buffer size */
1767 bnx2x_set_rx_buf_size(bp);
1768
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001769 if (bnx2x_alloc_mem(bp))
1770 return -ENOMEM;
1771
1772 /* As long as bnx2x_alloc_mem() may possibly update
1773 * bp->num_queues, bnx2x_set_real_num_queues() should always
1774 * come after it.
1775 */
1776 rc = bnx2x_set_real_num_queues(bp);
1777 if (rc) {
1778 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001779 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001780 }
1781
Ariel Elior6383c0b2011-07-14 08:31:57 +00001782 /* configure multi cos mappings in kernel.
1783 * this configuration may be overriden by a multi class queue discipline
1784 * or by a dcbx negotiation result.
1785 */
1786 bnx2x_setup_tc(bp->dev, bp->max_cos);
1787
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001788 bnx2x_napi_enable(bp);
1789
Ariel Elior889b9af2012-01-26 06:01:51 +00001790 /* set pf load just before approaching the MCP */
1791 bnx2x_set_pf_load(bp);
1792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001793 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001794 * Returns the type of LOAD command:
1795 * if it is the first port to be initialized
1796 * common blocks should be initialized, otherwise - not
1797 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001798 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00001799 /* init fw_seq */
1800 bp->fw_seq =
1801 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1802 DRV_MSG_SEQ_NUMBER_MASK);
1803 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1804
1805 /* Get current FW pulse sequence */
1806 bp->fw_drv_pulse_wr_seq =
1807 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1808 DRV_PULSE_SEQ_MASK);
1809 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1810
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001811 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001812 if (!load_code) {
1813 BNX2X_ERR("MCP response failure, aborting\n");
1814 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001815 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001816 }
1817 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1818 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001819 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001820 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00001821 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1822 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1823 /* build FW version dword */
1824 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1825 (BCM_5710_FW_MINOR_VERSION << 8) +
1826 (BCM_5710_FW_REVISION_VERSION << 16) +
1827 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1828
1829 /* read loaded FW from chip */
1830 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1831
1832 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1833 loaded_fw, my_fw);
1834
1835 /* abort nic load if version mismatch */
1836 if (my_fw != loaded_fw) {
1837 BNX2X_ERR("bnx2x with FW %x already loaded, "
1838 "which mismatches my %x FW. aborting",
1839 loaded_fw, my_fw);
1840 rc = -EBUSY;
1841 LOAD_ERROR_EXIT(bp, load_error2);
1842 }
1843 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001844
1845 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001846 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001847
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001848 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1849 path, load_count[path][0], load_count[path][1],
1850 load_count[path][2]);
1851 load_count[path][0]++;
1852 load_count[path][1 + port]++;
1853 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1854 path, load_count[path][0], load_count[path][1],
1855 load_count[path][2]);
1856 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001857 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001858 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001859 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1860 else
1861 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1862 }
1863
1864 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001865 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001866 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001867 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001868 /*
1869 * We need the barrier to ensure the ordering between the
1870 * writing to bp->port.pmf here and reading it from the
1871 * bnx2x_periodic_task().
1872 */
1873 smp_mb();
1874 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1875 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001876 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001877
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001878 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1879
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001880 /* Init Function state controlling object */
1881 bnx2x__init_func_obj(bp);
1882
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883 /* Initialize HW */
1884 rc = bnx2x_init_hw(bp, load_code);
1885 if (rc) {
1886 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001887 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001888 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001889 }
1890
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001891 /* Connect to IRQs */
1892 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001893 if (rc) {
1894 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001895 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001896 }
1897
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001898 /* Setup NIC internals and enable interrupts */
1899 bnx2x_nic_init(bp, load_code);
1900
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001901 /* Init per-function objects */
1902 bnx2x_init_bp_objs(bp);
1903
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001904 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1905 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001906 (bp->common.shmem2_base)) {
1907 if (SHMEM2_HAS(bp, dcc_support))
1908 SHMEM2_WR(bp, dcc_support,
1909 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1910 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1911 }
1912
1913 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1914 rc = bnx2x_func_start(bp);
1915 if (rc) {
1916 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001917 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001918 LOAD_ERROR_EXIT(bp, load_error3);
1919 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001920
1921 /* Send LOAD_DONE command to MCP */
1922 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001923 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001924 if (!load_code) {
1925 BNX2X_ERR("MCP response failure, aborting\n");
1926 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001927 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001928 }
1929 }
1930
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001931 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001932 if (rc) {
1933 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001934 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001935 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001936
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001937#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001938 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001939 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001940#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001941
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001942 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001943 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001944 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001945 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001946 }
1947
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001948 rc = bnx2x_init_rss_pf(bp);
1949 if (rc)
1950 LOAD_ERROR_EXIT(bp, load_error4);
1951
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001952 /* Now when Clients are configured we are ready to work */
1953 bp->state = BNX2X_STATE_OPEN;
1954
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001955 /* Configure a ucast MAC */
1956 rc = bnx2x_set_eth_mac(bp, true);
1957 if (rc)
1958 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001959
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001960 if (bp->pending_max) {
1961 bnx2x_update_max_mf_config(bp, bp->pending_max);
1962 bp->pending_max = 0;
1963 }
1964
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001965 if (bp->port.pmf)
1966 bnx2x_initial_phy_init(bp, load_mode);
1967
1968 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001969
1970 /* Initialize Rx filter. */
1971 netif_addr_lock_bh(bp->dev);
1972 bnx2x_set_rx_mode(bp->dev);
1973 netif_addr_unlock_bh(bp->dev);
1974
1975 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001976 switch (load_mode) {
1977 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001978 /* Tx queue should be only reenabled */
1979 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001980 break;
1981
1982 case LOAD_OPEN:
1983 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001984 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001985 break;
1986
1987 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001988 bp->state = BNX2X_STATE_DIAG;
1989 break;
1990
1991 default:
1992 break;
1993 }
1994
Dmitry Kravkov00253a82011-11-13 04:34:25 +00001995 if (bp->port.pmf)
Yuval Mintze695a2d2012-03-12 11:22:06 +00001996 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00001997 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001998 bnx2x__link_status_update(bp);
1999
2000 /* start the timer */
2001 mod_timer(&bp->timer, jiffies + bp->current_interval);
2002
2003#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002004 /* re-read iscsi info */
2005 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002006 bnx2x_setup_cnic_irq_info(bp);
2007 if (bp->state == BNX2X_STATE_OPEN)
2008 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2009#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002010
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002011 /* mark driver is loaded in shmem2 */
2012 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2013 u32 val;
2014 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2015 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2016 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2017 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2018 }
2019
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002020 /* Wait for all pending SP commands to complete */
2021 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2022 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2023 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2024 return -EBUSY;
2025 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002026
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002027 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002028 return 0;
2029
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002030#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002031load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002032#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002033 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002034 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002035#endif
2036load_error3:
2037 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002038
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002039 /* Clean queueable objects */
2040 bnx2x_squeeze_objects(bp);
2041
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002042 /* Free SKBs, SGEs, TPA pool and driver internals */
2043 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002044 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002045 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002046
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002047 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002048 bnx2x_free_irq(bp);
2049load_error2:
2050 if (!BP_NOMCP(bp)) {
2051 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2052 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2053 }
2054
2055 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002056load_error1:
2057 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002058 /* clear pf_load status, as it was already set */
2059 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002060load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002061 bnx2x_free_mem(bp);
2062
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002063 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002064#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002065}
2066
2067/* must be called with rtnl_lock */
2068int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2069{
2070 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002071 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002072
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002073 /* mark driver is unloaded in shmem2 */
2074 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2075 u32 val;
2076 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2077 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2078 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2079 }
2080
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002081 if ((bp->state == BNX2X_STATE_CLOSED) ||
2082 (bp->state == BNX2X_STATE_ERROR)) {
2083 /* We can get here if the driver has been unloaded
2084 * during parity error recovery and is either waiting for a
2085 * leader to complete or for other functions to unload and
2086 * then ifdown has been issued. In this case we want to
2087 * unload and let other functions to complete a recovery
2088 * process.
2089 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002090 bp->recovery_state = BNX2X_RECOVERY_DONE;
2091 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002092 bnx2x_release_leader_lock(bp);
2093 smp_mb();
2094
2095 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002096
2097 return -EINVAL;
2098 }
2099
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002100 /*
2101 * It's important to set the bp->state to the value different from
2102 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2103 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2104 */
2105 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2106 smp_mb();
2107
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002108 /* Stop Tx */
2109 bnx2x_tx_disable(bp);
2110
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002111#ifdef BCM_CNIC
2112 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2113#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002114
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002115 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002117 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002118
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002119 /* Set ALWAYS_ALIVE bit in shmem */
2120 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2121
2122 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002123
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002124 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002125 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002126
2127 /* Cleanup the chip if needed */
2128 if (unload_mode != UNLOAD_RECOVERY)
2129 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002130 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002131 /* Send the UNLOAD_REQUEST to the MCP */
2132 bnx2x_send_unload_req(bp, unload_mode);
2133
2134 /*
2135 * Prevent transactions to host from the functions on the
2136 * engine that doesn't reset global blocks in case of global
2137 * attention once gloabl blocks are reset and gates are opened
2138 * (the engine which leader will perform the recovery
2139 * last).
2140 */
2141 if (!CHIP_IS_E1x(bp))
2142 bnx2x_pf_disable(bp);
2143
2144 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002145 bnx2x_netif_stop(bp, 1);
2146
2147 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002148 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002149
2150 /* Report UNLOAD_DONE to MCP */
2151 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002152 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002153
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002154 /*
2155 * At this stage no more interrupts will arrive so we may safly clean
2156 * the queueable objects here in case they failed to get cleaned so far.
2157 */
2158 bnx2x_squeeze_objects(bp);
2159
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002160 /* There should be no more pending SP commands at this stage */
2161 bp->sp_state = 0;
2162
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002163 bp->port.pmf = 0;
2164
2165 /* Free SKBs, SGEs, TPA pool and driver internals */
2166 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002167 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002168 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002169
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002170 bnx2x_free_mem(bp);
2171
2172 bp->state = BNX2X_STATE_CLOSED;
2173
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002174 /* Check if there are pending parity attentions. If there are - set
2175 * RECOVERY_IN_PROGRESS.
2176 */
2177 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2178 bnx2x_set_reset_in_progress(bp);
2179
2180 /* Set RESET_IS_GLOBAL if needed */
2181 if (global)
2182 bnx2x_set_reset_global(bp);
2183 }
2184
2185
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002186 /* The last driver must disable a "close the gate" if there is no
2187 * parity attention or "process kill" pending.
2188 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002189 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002190 bnx2x_disable_close_the_gate(bp);
2191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002192 return 0;
2193}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002194
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002195int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2196{
2197 u16 pmcsr;
2198
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002199 /* If there is no power capability, silently succeed */
2200 if (!bp->pm_cap) {
2201 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2202 return 0;
2203 }
2204
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002205 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2206
2207 switch (state) {
2208 case PCI_D0:
2209 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2210 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2211 PCI_PM_CTRL_PME_STATUS));
2212
2213 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2214 /* delay required during transition out of D3hot */
2215 msleep(20);
2216 break;
2217
2218 case PCI_D3hot:
2219 /* If there are other clients above don't
2220 shut down the power */
2221 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2222 return 0;
2223 /* Don't shut down the power for emulation and FPGA */
2224 if (CHIP_REV_IS_SLOW(bp))
2225 return 0;
2226
2227 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2228 pmcsr |= 3;
2229
2230 if (bp->wol)
2231 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2232
2233 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2234 pmcsr);
2235
2236 /* No more memory access after this point until
2237 * device is brought back to D0.
2238 */
2239 break;
2240
2241 default:
2242 return -EINVAL;
2243 }
2244 return 0;
2245}
2246
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002247/*
2248 * net_device service functions
2249 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002250int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002251{
2252 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002253 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002254 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2255 napi);
2256 struct bnx2x *bp = fp->bp;
2257
2258 while (1) {
2259#ifdef BNX2X_STOP_ON_ERROR
2260 if (unlikely(bp->panic)) {
2261 napi_complete(napi);
2262 return 0;
2263 }
2264#endif
2265
Ariel Elior6383c0b2011-07-14 08:31:57 +00002266 for_each_cos_in_tx_queue(fp, cos)
2267 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2268 bnx2x_tx_int(bp, &fp->txdata[cos]);
2269
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002270
2271 if (bnx2x_has_rx_work(fp)) {
2272 work_done += bnx2x_rx_int(fp, budget - work_done);
2273
2274 /* must not complete if we consumed full budget */
2275 if (work_done >= budget)
2276 break;
2277 }
2278
2279 /* Fall out from the NAPI loop if needed */
2280 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002281#ifdef BCM_CNIC
2282 /* No need to update SB for FCoE L2 ring as long as
2283 * it's connected to the default SB and the SB
2284 * has been updated when NAPI was scheduled.
2285 */
2286 if (IS_FCOE_FP(fp)) {
2287 napi_complete(napi);
2288 break;
2289 }
2290#endif
2291
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002292 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002293 /* bnx2x_has_rx_work() reads the status block,
2294 * thus we need to ensure that status block indices
2295 * have been actually read (bnx2x_update_fpsb_idx)
2296 * prior to this check (bnx2x_has_rx_work) so that
2297 * we won't write the "newer" value of the status block
2298 * to IGU (if there was a DMA right after
2299 * bnx2x_has_rx_work and if there is no rmb, the memory
2300 * reading (bnx2x_update_fpsb_idx) may be postponed
2301 * to right before bnx2x_ack_sb). In this case there
2302 * will never be another interrupt until there is
2303 * another update of the status block, while there
2304 * is still unhandled work.
2305 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002306 rmb();
2307
2308 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2309 napi_complete(napi);
2310 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002311 DP(NETIF_MSG_HW,
2312 "Update index to %d\n", fp->fp_hc_idx);
2313 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2314 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002315 IGU_INT_ENABLE, 1);
2316 break;
2317 }
2318 }
2319 }
2320
2321 return work_done;
2322}
2323
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002324/* we split the first BD into headers and data BDs
2325 * to ease the pain of our fellow microcode engineers
2326 * we use one mapping for both BDs
2327 * So far this has only been observed to happen
2328 * in Other Operating Systems(TM)
2329 */
2330static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002331 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002332 struct sw_tx_bd *tx_buf,
2333 struct eth_tx_start_bd **tx_bd, u16 hlen,
2334 u16 bd_prod, int nbd)
2335{
2336 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2337 struct eth_tx_bd *d_tx_bd;
2338 dma_addr_t mapping;
2339 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2340
2341 /* first fix first BD */
2342 h_tx_bd->nbd = cpu_to_le16(nbd);
2343 h_tx_bd->nbytes = cpu_to_le16(hlen);
2344
2345 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2346 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2347 h_tx_bd->addr_lo, h_tx_bd->nbd);
2348
2349 /* now get a new data BD
2350 * (after the pbd) and fill it */
2351 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002352 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002353
2354 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2355 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2356
2357 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2358 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2359 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2360
2361 /* this marks the BD as one that has no individual mapping */
2362 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2363
2364 DP(NETIF_MSG_TX_QUEUED,
2365 "TSO split data size is %d (%x:%x)\n",
2366 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2367
2368 /* update tx_bd */
2369 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2370
2371 return bd_prod;
2372}
2373
2374static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2375{
2376 if (fix > 0)
2377 csum = (u16) ~csum_fold(csum_sub(csum,
2378 csum_partial(t_header - fix, fix, 0)));
2379
2380 else if (fix < 0)
2381 csum = (u16) ~csum_fold(csum_add(csum,
2382 csum_partial(t_header, -fix, 0)));
2383
2384 return swab16(csum);
2385}
2386
2387static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2388{
2389 u32 rc;
2390
2391 if (skb->ip_summed != CHECKSUM_PARTIAL)
2392 rc = XMIT_PLAIN;
2393
2394 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002395 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002396 rc = XMIT_CSUM_V6;
2397 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2398 rc |= XMIT_CSUM_TCP;
2399
2400 } else {
2401 rc = XMIT_CSUM_V4;
2402 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2403 rc |= XMIT_CSUM_TCP;
2404 }
2405 }
2406
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002407 if (skb_is_gso_v6(skb))
2408 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2409 else if (skb_is_gso(skb))
2410 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002411
2412 return rc;
2413}
2414
2415#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2416/* check if packet requires linearization (packet is too fragmented)
2417 no need to check fragmentation if page size > 8K (there will be no
2418 violation to FW restrictions) */
2419static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2420 u32 xmit_type)
2421{
2422 int to_copy = 0;
2423 int hlen = 0;
2424 int first_bd_sz = 0;
2425
2426 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2427 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2428
2429 if (xmit_type & XMIT_GSO) {
2430 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2431 /* Check if LSO packet needs to be copied:
2432 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2433 int wnd_size = MAX_FETCH_BD - 3;
2434 /* Number of windows to check */
2435 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2436 int wnd_idx = 0;
2437 int frag_idx = 0;
2438 u32 wnd_sum = 0;
2439
2440 /* Headers length */
2441 hlen = (int)(skb_transport_header(skb) - skb->data) +
2442 tcp_hdrlen(skb);
2443
2444 /* Amount of data (w/o headers) on linear part of SKB*/
2445 first_bd_sz = skb_headlen(skb) - hlen;
2446
2447 wnd_sum = first_bd_sz;
2448
2449 /* Calculate the first sum - it's special */
2450 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2451 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002452 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002453
2454 /* If there was data on linear skb data - check it */
2455 if (first_bd_sz > 0) {
2456 if (unlikely(wnd_sum < lso_mss)) {
2457 to_copy = 1;
2458 goto exit_lbl;
2459 }
2460
2461 wnd_sum -= first_bd_sz;
2462 }
2463
2464 /* Others are easier: run through the frag list and
2465 check all windows */
2466 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2467 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002468 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002469
2470 if (unlikely(wnd_sum < lso_mss)) {
2471 to_copy = 1;
2472 break;
2473 }
2474 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002475 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002476 }
2477 } else {
2478 /* in non-LSO too fragmented packet should always
2479 be linearized */
2480 to_copy = 1;
2481 }
2482 }
2483
2484exit_lbl:
2485 if (unlikely(to_copy))
2486 DP(NETIF_MSG_TX_QUEUED,
2487 "Linearization IS REQUIRED for %s packet. "
2488 "num_frags %d hlen %d first_bd_sz %d\n",
2489 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2490 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2491
2492 return to_copy;
2493}
2494#endif
2495
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002496static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2497 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002498{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002499 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2500 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2501 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002502 if ((xmit_type & XMIT_GSO_V6) &&
2503 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002504 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002505}
2506
2507/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002508 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002509 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002510 * @skb: packet skb
2511 * @pbd: parse BD
2512 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002513 */
2514static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2515 struct eth_tx_parse_bd_e1x *pbd,
2516 u32 xmit_type)
2517{
2518 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2519 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2520 pbd->tcp_flags = pbd_tcp_flags(skb);
2521
2522 if (xmit_type & XMIT_GSO_V4) {
2523 pbd->ip_id = swab16(ip_hdr(skb)->id);
2524 pbd->tcp_pseudo_csum =
2525 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2526 ip_hdr(skb)->daddr,
2527 0, IPPROTO_TCP, 0));
2528
2529 } else
2530 pbd->tcp_pseudo_csum =
2531 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2532 &ipv6_hdr(skb)->daddr,
2533 0, IPPROTO_TCP, 0));
2534
2535 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2536}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002538/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002539 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002540 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002541 * @bp: driver handle
2542 * @skb: packet skb
2543 * @parsing_data: data to be updated
2544 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002545 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002546 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002547 */
2548static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002549 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002550{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002551 *parsing_data |=
2552 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2553 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2554 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002555
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002556 if (xmit_type & XMIT_CSUM_TCP) {
2557 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2558 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2559 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002560
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002561 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2562 } else
2563 /* We support checksum offload for TCP and UDP only.
2564 * No need to pass the UDP header length - it's a constant.
2565 */
2566 return skb_transport_header(skb) +
2567 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002568}
2569
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002570static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2571 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2572{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002573 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2574
2575 if (xmit_type & XMIT_CSUM_V4)
2576 tx_start_bd->bd_flags.as_bitfield |=
2577 ETH_TX_BD_FLAGS_IP_CSUM;
2578 else
2579 tx_start_bd->bd_flags.as_bitfield |=
2580 ETH_TX_BD_FLAGS_IPV6;
2581
2582 if (!(xmit_type & XMIT_CSUM_TCP))
2583 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002584}
2585
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002586/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002587 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002588 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002589 * @bp: driver handle
2590 * @skb: packet skb
2591 * @pbd: parse BD to be updated
2592 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002593 */
2594static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2595 struct eth_tx_parse_bd_e1x *pbd,
2596 u32 xmit_type)
2597{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002598 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002599
2600 /* for now NS flag is not used in Linux */
2601 pbd->global_data =
2602 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2603 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2604
2605 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002606 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002607
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002608 hlen += pbd->ip_hlen_w;
2609
2610 /* We support checksum offload for TCP and UDP only */
2611 if (xmit_type & XMIT_CSUM_TCP)
2612 hlen += tcp_hdrlen(skb) / 2;
2613 else
2614 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002615
2616 pbd->total_hlen_w = cpu_to_le16(hlen);
2617 hlen = hlen*2;
2618
2619 if (xmit_type & XMIT_CSUM_TCP) {
2620 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2621
2622 } else {
2623 s8 fix = SKB_CS_OFF(skb); /* signed! */
2624
2625 DP(NETIF_MSG_TX_QUEUED,
2626 "hlen %d fix %d csum before fix %x\n",
2627 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2628
2629 /* HW bug: fixup the CSUM */
2630 pbd->tcp_pseudo_csum =
2631 bnx2x_csum_fix(skb_transport_header(skb),
2632 SKB_CS(skb), fix);
2633
2634 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2635 pbd->tcp_pseudo_csum);
2636 }
2637
2638 return hlen;
2639}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002640
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002641/* called with netif_tx_lock
2642 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2643 * netif_wake_queue()
2644 */
2645netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2646{
2647 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002648
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002649 struct bnx2x_fastpath *fp;
2650 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002651 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002652 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002653 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002654 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002655 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002656 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002657 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002658 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002659 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002660 dma_addr_t mapping;
2661 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2662 int i;
2663 u8 hlen = 0;
2664 __le16 pkt_size = 0;
2665 struct ethhdr *eth;
2666 u8 mac_type = UNICAST_ADDRESS;
2667
2668#ifdef BNX2X_STOP_ON_ERROR
2669 if (unlikely(bp->panic))
2670 return NETDEV_TX_BUSY;
2671#endif
2672
Ariel Elior6383c0b2011-07-14 08:31:57 +00002673 txq_index = skb_get_queue_mapping(skb);
2674 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002675
Ariel Elior6383c0b2011-07-14 08:31:57 +00002676 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2677
2678 /* decode the fastpath index and the cos index from the txq */
2679 fp_index = TXQ_TO_FP(txq_index);
2680 txdata_index = TXQ_TO_COS(txq_index);
2681
2682#ifdef BCM_CNIC
2683 /*
2684 * Override the above for the FCoE queue:
2685 * - FCoE fp entry is right after the ETH entries.
2686 * - FCoE L2 queue uses bp->txdata[0] only.
2687 */
2688 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2689 bnx2x_fcoe_tx(bp, txq_index)))) {
2690 fp_index = FCOE_IDX;
2691 txdata_index = 0;
2692 }
2693#endif
2694
2695 /* enable this debug print to view the transmission queue being used
Joe Perches94f05b02011-08-14 12:16:20 +00002696 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002697 txq_index, fp_index, txdata_index); */
2698
2699 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002700 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002701 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002702
Ariel Elior6383c0b2011-07-14 08:31:57 +00002703 /* enable this debug print to view the tranmission details
2704 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
Joe Perches94f05b02011-08-14 12:16:20 +00002705 " tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002706 txdata->cid, fp_index, txdata_index, txdata, fp); */
2707
2708 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2709 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002710 fp->eth_q_stats.driver_xoff++;
2711 netif_tx_stop_queue(txq);
2712 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2713 return NETDEV_TX_BUSY;
2714 }
2715
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002716 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2717 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002718 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002719 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2720
2721 eth = (struct ethhdr *)skb->data;
2722
2723 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2724 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2725 if (is_broadcast_ether_addr(eth->h_dest))
2726 mac_type = BROADCAST_ADDRESS;
2727 else
2728 mac_type = MULTICAST_ADDRESS;
2729 }
2730
2731#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2732 /* First, check if we need to linearize the skb (due to FW
2733 restrictions). No need to check fragmentation if page size > 8K
2734 (there will be no violation to FW restrictions) */
2735 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2736 /* Statistics of linearization */
2737 bp->lin_cnt++;
2738 if (skb_linearize(skb) != 0) {
2739 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2740 "silently dropping this SKB\n");
2741 dev_kfree_skb_any(skb);
2742 return NETDEV_TX_OK;
2743 }
2744 }
2745#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002746 /* Map skb linear data for DMA */
2747 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2748 skb_headlen(skb), DMA_TO_DEVICE);
2749 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2750 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2751 "silently dropping this SKB\n");
2752 dev_kfree_skb_any(skb);
2753 return NETDEV_TX_OK;
2754 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002755 /*
2756 Please read carefully. First we use one BD which we mark as start,
2757 then we have a parsing info BD (used for TSO or xsum),
2758 and only then we have the rest of the TSO BDs.
2759 (don't forget to mark the last one as last,
2760 and to unmap only AFTER you write to the BD ...)
2761 And above all, all pdb sizes are in words - NOT DWORDS!
2762 */
2763
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002764 /* get current pkt produced now - advance it just before sending packet
2765 * since mapping of pages may fail and cause packet to be dropped
2766 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002767 pkt_prod = txdata->tx_pkt_prod;
2768 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002769
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002770 /* get a tx_buf and first BD
2771 * tx_start_bd may be changed during SPLIT,
2772 * but first_bd will always stay first
2773 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002774 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2775 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002776 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002777
2778 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002779 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2780 mac_type);
2781
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002782 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002783 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002784
2785 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002786 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002787 tx_buf->skb = skb;
2788 tx_buf->flags = 0;
2789
2790 DP(NETIF_MSG_TX_QUEUED,
2791 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002792 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793
Jesse Grosseab6d182010-10-20 13:56:03 +00002794 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002795 tx_start_bd->vlan_or_ethertype =
2796 cpu_to_le16(vlan_tx_tag_get(skb));
2797 tx_start_bd->bd_flags.as_bitfield |=
2798 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002799 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002800 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002801
2802 /* turn on parsing and get a BD */
2803 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002804
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002805 if (xmit_type & XMIT_CSUM)
2806 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002807
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002808 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002809 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002810 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2811 /* Set PBD in checksum offload case */
2812 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002813 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2814 &pbd_e2_parsing_data,
2815 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002816 if (IS_MF_SI(bp)) {
2817 /*
2818 * fill in the MAC addresses in the PBD - for local
2819 * switching
2820 */
2821 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2822 &pbd_e2->src_mac_addr_mid,
2823 &pbd_e2->src_mac_addr_lo,
2824 eth->h_source);
2825 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2826 &pbd_e2->dst_mac_addr_mid,
2827 &pbd_e2->dst_mac_addr_lo,
2828 eth->h_dest);
2829 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002830 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002831 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002832 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2833 /* Set PBD in checksum offload case */
2834 if (xmit_type & XMIT_CSUM)
2835 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002836
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002837 }
2838
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002839 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002840 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2841 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002842 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002843 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2844 pkt_size = tx_start_bd->nbytes;
2845
2846 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2847 " nbytes %d flags %x vlan %x\n",
2848 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2849 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002850 tx_start_bd->bd_flags.as_bitfield,
2851 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002852
2853 if (xmit_type & XMIT_GSO) {
2854
2855 DP(NETIF_MSG_TX_QUEUED,
2856 "TSO packet len %d hlen %d total len %d tso size %d\n",
2857 skb->len, hlen, skb_headlen(skb),
2858 skb_shinfo(skb)->gso_size);
2859
2860 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2861
2862 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002863 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2864 &tx_start_bd, hlen,
2865 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002866 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002867 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2868 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002869 else
2870 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002871 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002872
2873 /* Set the PBD's parsing_data field if not zero
2874 * (for the chips newer than 57711).
2875 */
2876 if (pbd_e2_parsing_data)
2877 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2878
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002879 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2880
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002881 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002882 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2884
Eric Dumazet9e903e02011-10-18 21:00:24 +00002885 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2886 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002887 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00002888 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002889
2890 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2891 "dropping packet...\n");
2892
2893 /* we need unmap all buffers already mapped
2894 * for this SKB;
2895 * first_bd->nbd need to be properly updated
2896 * before call to bnx2x_free_tx_pkt
2897 */
2898 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002899 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00002900 TX_BD(txdata->tx_pkt_prod),
2901 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002902 return NETDEV_TX_OK;
2903 }
2904
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002905 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002906 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002907 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002908 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002910 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2911 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00002912 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2913 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002914 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002915
2916 DP(NETIF_MSG_TX_QUEUED,
2917 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2918 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2919 le16_to_cpu(tx_data_bd->nbytes));
2920 }
2921
2922 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2923
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002924 /* update with actual num BDs */
2925 first_bd->nbd = cpu_to_le16(nbd);
2926
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002927 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2928
2929 /* now send a tx doorbell, counting the next BD
2930 * if the packet contains or ends with it
2931 */
2932 if (TX_BD_POFF(bd_prod) < nbd)
2933 nbd++;
2934
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002935 /* total_pkt_bytes should be set on the first data BD if
2936 * it's not an LSO packet and there is more than one
2937 * data BD. In this case pkt_size is limited by an MTU value.
2938 * However we prefer to set it for an LSO packet (while we don't
2939 * have to) in order to save some CPU cycles in a none-LSO
2940 * case, when we much more care about them.
2941 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002942 if (total_pkt_bd != NULL)
2943 total_pkt_bd->total_pkt_bytes = pkt_size;
2944
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002945 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002946 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002947 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002948 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002949 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2950 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2951 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2952 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002953 if (pbd_e2)
2954 DP(NETIF_MSG_TX_QUEUED,
2955 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2956 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2957 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2958 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2959 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002960 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2961
Tom Herbert2df1a702011-11-28 16:33:37 +00002962 netdev_tx_sent_queue(txq, skb->len);
2963
Ariel Elior6383c0b2011-07-14 08:31:57 +00002964 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002965 /*
2966 * Make sure that the BD data is updated before updating the producer
2967 * since FW might read the BD right after the producer is updated.
2968 * This is only applicable for weak-ordered memory model archs such
2969 * as IA-64. The following barrier is also mandatory since FW will
2970 * assumes packets must have BDs.
2971 */
2972 wmb();
2973
Ariel Elior6383c0b2011-07-14 08:31:57 +00002974 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002975 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002976
Ariel Elior6383c0b2011-07-14 08:31:57 +00002977 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002978
2979 mmiowb();
2980
Ariel Elior6383c0b2011-07-14 08:31:57 +00002981 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002982
Ariel Elior6383c0b2011-07-14 08:31:57 +00002983 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002984 netif_tx_stop_queue(txq);
2985
2986 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2987 * ordering of set_bit() in netif_tx_stop_queue() and read of
2988 * fp->bd_tx_cons */
2989 smp_mb();
2990
2991 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002992 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002993 netif_tx_wake_queue(txq);
2994 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002995 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002996
2997 return NETDEV_TX_OK;
2998}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002999
Ariel Elior6383c0b2011-07-14 08:31:57 +00003000/**
3001 * bnx2x_setup_tc - routine to configure net_device for multi tc
3002 *
3003 * @netdev: net device to configure
3004 * @tc: number of traffic classes to enable
3005 *
3006 * callback connected to the ndo_setup_tc function pointer
3007 */
3008int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3009{
3010 int cos, prio, count, offset;
3011 struct bnx2x *bp = netdev_priv(dev);
3012
3013 /* setup tc must be called under rtnl lock */
3014 ASSERT_RTNL();
3015
3016 /* no traffic classes requested. aborting */
3017 if (!num_tc) {
3018 netdev_reset_tc(dev);
3019 return 0;
3020 }
3021
3022 /* requested to support too many traffic classes */
3023 if (num_tc > bp->max_cos) {
3024 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
Joe Perches94f05b02011-08-14 12:16:20 +00003025 " requested: %d. max supported is %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003026 num_tc, bp->max_cos);
3027 return -EINVAL;
3028 }
3029
3030 /* declare amount of supported traffic classes */
3031 if (netdev_set_num_tc(dev, num_tc)) {
Joe Perches94f05b02011-08-14 12:16:20 +00003032 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003033 num_tc);
3034 return -EINVAL;
3035 }
3036
3037 /* configure priority to traffic class mapping */
3038 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3039 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Joe Perches94f05b02011-08-14 12:16:20 +00003040 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003041 prio, bp->prio_to_cos[prio]);
3042 }
3043
3044
3045 /* Use this configuration to diffrentiate tc0 from other COSes
3046 This can be used for ets or pfc, and save the effort of setting
3047 up a multio class queue disc or negotiating DCBX with a switch
3048 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003049 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003050 for (prio = 1; prio < 16; prio++) {
3051 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003052 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003053 } */
3054
3055 /* configure traffic class to transmission queue mapping */
3056 for (cos = 0; cos < bp->max_cos; cos++) {
3057 count = BNX2X_NUM_ETH_QUEUES(bp);
3058 offset = cos * MAX_TXQS_PER_COS;
3059 netdev_set_tc_queue(dev, cos, count, offset);
Joe Perches94f05b02011-08-14 12:16:20 +00003060 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003061 cos, offset, count);
3062 }
3063
3064 return 0;
3065}
3066
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003067/* called with rtnl_lock */
3068int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3069{
3070 struct sockaddr *addr = p;
3071 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003072 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003073
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003074 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003075 return -EINVAL;
3076
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003077#ifdef BCM_CNIC
3078 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
3079 return -EINVAL;
3080#endif
3081
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003082 if (netif_running(dev)) {
3083 rc = bnx2x_set_eth_mac(bp, false);
3084 if (rc)
3085 return rc;
3086 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003087
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003088 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3090
3091 if (netif_running(dev))
3092 rc = bnx2x_set_eth_mac(bp, true);
3093
3094 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003095}
3096
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003097static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3098{
3099 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3100 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003101 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003102
3103 /* Common */
3104#ifdef BCM_CNIC
3105 if (IS_FCOE_IDX(fp_index)) {
3106 memset(sb, 0, sizeof(union host_hc_status_block));
3107 fp->status_blk_mapping = 0;
3108
3109 } else {
3110#endif
3111 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003112 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003113 BNX2X_PCI_FREE(sb->e2_sb,
3114 bnx2x_fp(bp, fp_index,
3115 status_blk_mapping),
3116 sizeof(struct host_hc_status_block_e2));
3117 else
3118 BNX2X_PCI_FREE(sb->e1x_sb,
3119 bnx2x_fp(bp, fp_index,
3120 status_blk_mapping),
3121 sizeof(struct host_hc_status_block_e1x));
3122#ifdef BCM_CNIC
3123 }
3124#endif
3125 /* Rx */
3126 if (!skip_rx_queue(bp, fp_index)) {
3127 bnx2x_free_rx_bds(fp);
3128
3129 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3130 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3131 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3132 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3133 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3134
3135 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3136 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3137 sizeof(struct eth_fast_path_rx_cqe) *
3138 NUM_RCQ_BD);
3139
3140 /* SGE ring */
3141 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3142 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3143 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3144 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3145 }
3146
3147 /* Tx */
3148 if (!skip_tx_queue(bp, fp_index)) {
3149 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003150 for_each_cos_in_tx_queue(fp, cos) {
3151 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3152
3153 DP(BNX2X_MSG_SP,
Joe Perches94f05b02011-08-14 12:16:20 +00003154 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003155 fp_index, cos, txdata->cid);
3156
3157 BNX2X_FREE(txdata->tx_buf_ring);
3158 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3159 txdata->tx_desc_mapping,
3160 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3161 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003162 }
3163 /* end of fastpath */
3164}
3165
3166void bnx2x_free_fp_mem(struct bnx2x *bp)
3167{
3168 int i;
3169 for_each_queue(bp, i)
3170 bnx2x_free_fp_mem_at(bp, i);
3171}
3172
3173static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3174{
3175 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003176 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003177 bnx2x_fp(bp, index, sb_index_values) =
3178 (__le16 *)status_blk.e2_sb->sb.index_values;
3179 bnx2x_fp(bp, index, sb_running_index) =
3180 (__le16 *)status_blk.e2_sb->sb.running_index;
3181 } else {
3182 bnx2x_fp(bp, index, sb_index_values) =
3183 (__le16 *)status_blk.e1x_sb->sb.index_values;
3184 bnx2x_fp(bp, index, sb_running_index) =
3185 (__le16 *)status_blk.e1x_sb->sb.running_index;
3186 }
3187}
3188
3189static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3190{
3191 union host_hc_status_block *sb;
3192 struct bnx2x_fastpath *fp = &bp->fp[index];
3193 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003194 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003195 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003196
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003197#ifdef BCM_CNIC
Dmitry Kravkov1fdf1552012-01-23 07:31:54 +00003198 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003199 rx_ring_size = MIN_RX_SIZE_NONTPA;
3200 bp->rx_ring_size = rx_ring_size;
3201 } else
3202#endif
David S. Miller8decf862011-09-22 03:23:13 -04003203 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003204 u32 cfg = SHMEM_RD(bp,
3205 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003206
David S. Miller8decf862011-09-22 03:23:13 -04003207 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3208
Mintz Yuvald760fc32012-02-15 02:10:28 +00003209 /* Dercease ring size for 1G functions */
3210 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3211 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3212 rx_ring_size /= 10;
3213
David S. Miller8decf862011-09-22 03:23:13 -04003214 /* allocate at least number of buffers required by FW */
3215 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3216 MIN_RX_SIZE_TPA, rx_ring_size);
3217
3218 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003219 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003220 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003221
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003222 /* Common */
3223 sb = &bnx2x_fp(bp, index, status_blk);
3224#ifdef BCM_CNIC
3225 if (!IS_FCOE_IDX(index)) {
3226#endif
3227 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003228 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003229 BNX2X_PCI_ALLOC(sb->e2_sb,
3230 &bnx2x_fp(bp, index, status_blk_mapping),
3231 sizeof(struct host_hc_status_block_e2));
3232 else
3233 BNX2X_PCI_ALLOC(sb->e1x_sb,
3234 &bnx2x_fp(bp, index, status_blk_mapping),
3235 sizeof(struct host_hc_status_block_e1x));
3236#ifdef BCM_CNIC
3237 }
3238#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003239
3240 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3241 * set shortcuts for it.
3242 */
3243 if (!IS_FCOE_IDX(index))
3244 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003245
3246 /* Tx */
3247 if (!skip_tx_queue(bp, index)) {
3248 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003249 for_each_cos_in_tx_queue(fp, cos) {
3250 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3251
3252 DP(BNX2X_MSG_SP, "allocating tx memory of "
Joe Perches94f05b02011-08-14 12:16:20 +00003253 "fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003254 index, cos);
3255
3256 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003257 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003258 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3259 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003260 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003261 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003262 }
3263
3264 /* Rx */
3265 if (!skip_rx_queue(bp, index)) {
3266 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3267 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3268 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3269 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3270 &bnx2x_fp(bp, index, rx_desc_mapping),
3271 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3272
3273 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3274 &bnx2x_fp(bp, index, rx_comp_mapping),
3275 sizeof(struct eth_fast_path_rx_cqe) *
3276 NUM_RCQ_BD);
3277
3278 /* SGE ring */
3279 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3280 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3281 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3282 &bnx2x_fp(bp, index, rx_sge_mapping),
3283 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3284 /* RX BD ring */
3285 bnx2x_set_next_page_rx_bd(fp);
3286
3287 /* CQ ring */
3288 bnx2x_set_next_page_rx_cq(fp);
3289
3290 /* BDs */
3291 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3292 if (ring_size < rx_ring_size)
3293 goto alloc_mem_err;
3294 }
3295
3296 return 0;
3297
3298/* handles low memory cases */
3299alloc_mem_err:
3300 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3301 index, ring_size);
3302 /* FW will drop all packets if queue is not big enough,
3303 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003304 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003305 */
3306 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003307 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003308 /* release memory allocated for this queue */
3309 bnx2x_free_fp_mem_at(bp, index);
3310 return -ENOMEM;
3311 }
3312 return 0;
3313}
3314
3315int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3316{
3317 int i;
3318
3319 /**
3320 * 1. Allocate FP for leading - fatal if error
3321 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003322 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3323 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003324 */
3325
3326 /* leading */
3327 if (bnx2x_alloc_fp_mem_at(bp, 0))
3328 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003329
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003330#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003331 if (!NO_FCOE(bp))
3332 /* FCoE */
3333 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3334 /* we will fail load process instead of mark
3335 * NO_FCOE_FLAG
3336 */
3337 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003338#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003339
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003340 /* RSS */
3341 for_each_nondefault_eth_queue(bp, i)
3342 if (bnx2x_alloc_fp_mem_at(bp, i))
3343 break;
3344
3345 /* handle memory failures */
3346 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3347 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3348
3349 WARN_ON(delta < 0);
3350#ifdef BCM_CNIC
3351 /**
3352 * move non eth FPs next to last eth FP
3353 * must be done in that order
3354 * FCOE_IDX < FWD_IDX < OOO_IDX
3355 */
3356
Ariel Elior6383c0b2011-07-14 08:31:57 +00003357 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003358 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3359#endif
3360 bp->num_queues -= delta;
3361 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3362 bp->num_queues + delta, bp->num_queues);
3363 }
3364
3365 return 0;
3366}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003367
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003368void bnx2x_free_mem_bp(struct bnx2x *bp)
3369{
3370 kfree(bp->fp);
3371 kfree(bp->msix_table);
3372 kfree(bp->ilt);
3373}
3374
3375int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3376{
3377 struct bnx2x_fastpath *fp;
3378 struct msix_entry *tbl;
3379 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003380 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003381
Ariel Elior6383c0b2011-07-14 08:31:57 +00003382 /*
3383 * The biggest MSI-X table we might need is as a maximum number of fast
3384 * path IGU SBs plus default SB (for PF).
3385 */
3386 msix_table_size = bp->igu_sb_cnt + 1;
3387
3388 /* fp array: RSS plus CNIC related L2 queues */
Thomas Meyer01e23742011-11-29 11:08:00 +00003389 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003390 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003391 if (!fp)
3392 goto alloc_err;
3393 bp->fp = fp;
3394
3395 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003396 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003397 if (!tbl)
3398 goto alloc_err;
3399 bp->msix_table = tbl;
3400
3401 /* ilt */
3402 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3403 if (!ilt)
3404 goto alloc_err;
3405 bp->ilt = ilt;
3406
3407 return 0;
3408alloc_err:
3409 bnx2x_free_mem_bp(bp);
3410 return -ENOMEM;
3411
3412}
3413
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003414int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003415{
3416 struct bnx2x *bp = netdev_priv(dev);
3417
3418 if (unlikely(!netif_running(dev)))
3419 return 0;
3420
3421 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3422 return bnx2x_nic_load(bp, LOAD_NORMAL);
3423}
3424
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003425int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3426{
3427 u32 sel_phy_idx = 0;
3428 if (bp->link_params.num_phys <= 1)
3429 return INT_PHY;
3430
3431 if (bp->link_vars.link_up) {
3432 sel_phy_idx = EXT_PHY1;
3433 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3434 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3435 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3436 sel_phy_idx = EXT_PHY2;
3437 } else {
3438
3439 switch (bnx2x_phy_selection(&bp->link_params)) {
3440 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3441 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3442 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3443 sel_phy_idx = EXT_PHY1;
3444 break;
3445 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3446 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3447 sel_phy_idx = EXT_PHY2;
3448 break;
3449 }
3450 }
3451
3452 return sel_phy_idx;
3453
3454}
3455int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3456{
3457 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3458 /*
3459 * The selected actived PHY is always after swapping (in case PHY
3460 * swapping is enabled). So when swapping is enabled, we need to reverse
3461 * the configuration
3462 */
3463
3464 if (bp->link_params.multi_phy_config &
3465 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3466 if (sel_phy_idx == EXT_PHY1)
3467 sel_phy_idx = EXT_PHY2;
3468 else if (sel_phy_idx == EXT_PHY2)
3469 sel_phy_idx = EXT_PHY1;
3470 }
3471 return LINK_CONFIG_IDX(sel_phy_idx);
3472}
3473
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003474#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3475int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3476{
3477 struct bnx2x *bp = netdev_priv(dev);
3478 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3479
3480 switch (type) {
3481 case NETDEV_FCOE_WWNN:
3482 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3483 cp->fcoe_wwn_node_name_lo);
3484 break;
3485 case NETDEV_FCOE_WWPN:
3486 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3487 cp->fcoe_wwn_port_name_lo);
3488 break;
3489 default:
3490 return -EINVAL;
3491 }
3492
3493 return 0;
3494}
3495#endif
3496
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003497/* called with rtnl_lock */
3498int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3499{
3500 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003501
3502 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00003503 netdev_err(dev, "Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003504 return -EAGAIN;
3505 }
3506
3507 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3508 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3509 return -EINVAL;
3510
3511 /* This does not race with packet allocation
3512 * because the actual alloc size is
3513 * only updated as part of load
3514 */
3515 dev->mtu = new_mtu;
3516
Dmitry Kravkovfe603b42012-02-20 09:59:11 +00003517 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3518
Michał Mirosław66371c42011-04-12 09:38:23 +00003519 return bnx2x_reload_if_running(dev);
3520}
3521
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003522netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003523 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003524{
3525 struct bnx2x *bp = netdev_priv(dev);
3526
3527 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003528 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003529 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003530 features &= ~NETIF_F_GRO;
3531 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003532
3533 return features;
3534}
3535
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003536int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003537{
3538 struct bnx2x *bp = netdev_priv(dev);
3539 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003540 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003541
3542 if (features & NETIF_F_LRO)
3543 flags |= TPA_ENABLE_FLAG;
3544 else
3545 flags &= ~TPA_ENABLE_FLAG;
3546
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003547 if (features & NETIF_F_GRO)
3548 flags |= GRO_ENABLE_FLAG;
3549 else
3550 flags &= ~GRO_ENABLE_FLAG;
3551
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003552 if (features & NETIF_F_LOOPBACK) {
3553 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3554 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3555 bnx2x_reload = true;
3556 }
3557 } else {
3558 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3559 bp->link_params.loopback_mode = LOOPBACK_NONE;
3560 bnx2x_reload = true;
3561 }
3562 }
3563
Michał Mirosław66371c42011-04-12 09:38:23 +00003564 if (flags ^ bp->flags) {
3565 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003566 bnx2x_reload = true;
3567 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003568
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003569 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003570 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3571 return bnx2x_reload_if_running(dev);
3572 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003573 }
3574
Michał Mirosław66371c42011-04-12 09:38:23 +00003575 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003576}
3577
3578void bnx2x_tx_timeout(struct net_device *dev)
3579{
3580 struct bnx2x *bp = netdev_priv(dev);
3581
3582#ifdef BNX2X_STOP_ON_ERROR
3583 if (!bp->panic)
3584 bnx2x_panic();
3585#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003586
3587 smp_mb__before_clear_bit();
3588 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3589 smp_mb__after_clear_bit();
3590
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003591 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003592 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003593}
3594
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003595int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3596{
3597 struct net_device *dev = pci_get_drvdata(pdev);
3598 struct bnx2x *bp;
3599
3600 if (!dev) {
3601 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3602 return -ENODEV;
3603 }
3604 bp = netdev_priv(dev);
3605
3606 rtnl_lock();
3607
3608 pci_save_state(pdev);
3609
3610 if (!netif_running(dev)) {
3611 rtnl_unlock();
3612 return 0;
3613 }
3614
3615 netif_device_detach(dev);
3616
3617 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3618
3619 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3620
3621 rtnl_unlock();
3622
3623 return 0;
3624}
3625
3626int bnx2x_resume(struct pci_dev *pdev)
3627{
3628 struct net_device *dev = pci_get_drvdata(pdev);
3629 struct bnx2x *bp;
3630 int rc;
3631
3632 if (!dev) {
3633 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3634 return -ENODEV;
3635 }
3636 bp = netdev_priv(dev);
3637
3638 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00003639 netdev_err(dev, "Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003640 return -EAGAIN;
3641 }
3642
3643 rtnl_lock();
3644
3645 pci_restore_state(pdev);
3646
3647 if (!netif_running(dev)) {
3648 rtnl_unlock();
3649 return 0;
3650 }
3651
3652 bnx2x_set_power_state(bp, PCI_D0);
3653 netif_device_attach(dev);
3654
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003655 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3656
3657 rtnl_unlock();
3658
3659 return rc;
3660}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003661
3662
3663void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3664 u32 cid)
3665{
3666 /* ustorm cxt validation */
3667 cxt->ustorm_ag_context.cdu_usage =
3668 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3669 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3670 /* xcontext validation */
3671 cxt->xstorm_ag_context.cdu_reserved =
3672 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3673 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3674}
3675
3676static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3677 u8 fw_sb_id, u8 sb_index,
3678 u8 ticks)
3679{
3680
3681 u32 addr = BAR_CSTRORM_INTMEM +
3682 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3683 REG_WR8(bp, addr, ticks);
3684 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3685 port, fw_sb_id, sb_index, ticks);
3686}
3687
3688static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3689 u16 fw_sb_id, u8 sb_index,
3690 u8 disable)
3691{
3692 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3693 u32 addr = BAR_CSTRORM_INTMEM +
3694 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3695 u16 flags = REG_RD16(bp, addr);
3696 /* clear and set */
3697 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3698 flags |= enable_flag;
3699 REG_WR16(bp, addr, flags);
3700 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3701 port, fw_sb_id, sb_index, disable);
3702}
3703
3704void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3705 u8 sb_index, u8 disable, u16 usec)
3706{
3707 int port = BP_PORT(bp);
3708 u8 ticks = usec / BNX2X_BTR;
3709
3710 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3711
3712 disable = disable ? 1 : (usec ? 0 : 1);
3713 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3714}