blob: d99f20ace9dfe2933ac5298c31e5654d5abff1ea [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000026#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000027#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000028#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000029#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
Merav Sicron65565882012-06-19 07:48:26 +000043 * source onto the target. Update txdata pointers and related
44 * content.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Merav Sicron65565882012-06-19 07:48:26 +000050 int old_max_eth_txqs, new_max_eth_txqs;
51 int old_txdata_index = 0, new_txdata_index = 0;
Ariel Elior72754082011-11-13 04:34:31 +000052
53 /* Copy the NAPI object as it has been already initialized */
54 from_fp->napi = to_fp->napi;
55
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000056 /* Move bnx2x_fastpath contents */
57 memcpy(to_fp, from_fp, sizeof(*to_fp));
58 to_fp->index = to;
Merav Sicron65565882012-06-19 07:48:26 +000059
60 /* Update txdata pointers in fp and move txdata content accordingly:
61 * Each fp consumes 'max_cos' txdata structures, so the index should be
62 * decremented by max_cos x delta.
63 */
64
65 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
66 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
67 (bp)->max_cos;
68 if (from == FCOE_IDX(bp)) {
69 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
70 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
71 }
72
73 memcpy(&bp->bnx2x_txq[old_txdata_index],
74 &bp->bnx2x_txq[new_txdata_index],
75 sizeof(struct bnx2x_fp_txdata));
76 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000077}
78
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030079int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
80
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000081/* free skb in the packet ring at pos idx
82 * return idx of last bd freed
83 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000084static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000085 u16 idx, unsigned int *pkts_compl,
86 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000087{
Ariel Elior6383c0b2011-07-14 08:31:57 +000088 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000089 struct eth_tx_start_bd *tx_start_bd;
90 struct eth_tx_bd *tx_data_bd;
91 struct sk_buff *skb = tx_buf->skb;
92 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
93 int nbd;
94
95 /* prefetch skb end pointer to speedup dev_kfree_skb() */
96 prefetch(&skb->end);
97
Merav Sicron51c1a582012-03-18 10:33:38 +000098 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +000099 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000100
101 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000102 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000103 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000104 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000105
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000107 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
108#ifdef BNX2X_STOP_ON_ERROR
109 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
110 BNX2X_ERR("BAD nbd!\n");
111 bnx2x_panic();
112 }
113#endif
114 new_cons = nbd + tx_buf->first_bd;
115
116 /* Get the next bd */
117 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
118
119 /* Skip a parse bd... */
120 --nbd;
121 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
122
123 /* ...and the TSO split header bd since they have no mapping */
124 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
125 --nbd;
126 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
127 }
128
129 /* now free frags */
130 while (nbd > 0) {
131
Ariel Elior6383c0b2011-07-14 08:31:57 +0000132 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000133 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
134 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
135 if (--nbd)
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137 }
138
139 /* release skb */
140 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000141 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000142 (*pkts_compl)++;
143 (*bytes_compl) += skb->len;
144 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000145
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000146 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000147 tx_buf->first_bd = 0;
148 tx_buf->skb = NULL;
149
150 return new_cons;
151}
152
Ariel Elior6383c0b2011-07-14 08:31:57 +0000153int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000154{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000155 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000156 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000157 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000158
159#ifdef BNX2X_STOP_ON_ERROR
160 if (unlikely(bp->panic))
161 return -1;
162#endif
163
Ariel Elior6383c0b2011-07-14 08:31:57 +0000164 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
165 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
166 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000167
168 while (sw_cons != hw_cons) {
169 u16 pkt_cons;
170
171 pkt_cons = TX_BD(sw_cons);
172
Merav Sicron51c1a582012-03-18 10:33:38 +0000173 DP(NETIF_MSG_TX_DONE,
174 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000175 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000176
Tom Herbert2df1a702011-11-28 16:33:37 +0000177 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
178 &pkts_compl, &bytes_compl);
179
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000180 sw_cons++;
181 }
182
Tom Herbert2df1a702011-11-28 16:33:37 +0000183 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
184
Ariel Elior6383c0b2011-07-14 08:31:57 +0000185 txdata->tx_pkt_cons = sw_cons;
186 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000187
188 /* Need to make the tx_bd_cons update visible to start_xmit()
189 * before checking for netif_tx_queue_stopped(). Without the
190 * memory barrier, there is a small possibility that
191 * start_xmit() will miss it and cause the queue to be stopped
192 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300193 * On the other hand we need an rmb() here to ensure the proper
194 * ordering of bit testing in the following
195 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000196 */
197 smp_mb();
198
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000199 if (unlikely(netif_tx_queue_stopped(txq))) {
200 /* Taking tx_lock() is needed to prevent reenabling the queue
201 * while it's empty. This could have happen if rx_action() gets
202 * suspended in bnx2x_tx_int() after the condition before
203 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
204 *
205 * stops the queue->sees fresh tx_bd_cons->releases the queue->
206 * sends some packets consuming the whole queue again->
207 * stops the queue
208 */
209
210 __netif_tx_lock(txq, smp_processor_id());
211
212 if ((netif_tx_queue_stopped(txq)) &&
213 (bp->state == BNX2X_STATE_OPEN) &&
Eric Dumazetbc147862012-06-13 09:45:16 +0000214 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000215 netif_tx_wake_queue(txq);
216
217 __netif_tx_unlock(txq);
218 }
219 return 0;
220}
221
222static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
223 u16 idx)
224{
225 u16 last_max = fp->last_max_sge;
226
227 if (SUB_S16(idx, last_max) > 0)
228 fp->last_max_sge = idx;
229}
230
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000231static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
232 u16 sge_len,
233 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000234{
235 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000236 u16 last_max, last_elem, first_elem;
237 u16 delta = 0;
238 u16 i;
239
240 if (!sge_len)
241 return;
242
243 /* First mark all used pages */
244 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300245 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000246 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000247
248 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000249 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000250
251 /* Here we assume that the last SGE index is the biggest */
252 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000253 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000254 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000255
256 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300257 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
258 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000259
260 /* If ring is not full */
261 if (last_elem + 1 != first_elem)
262 last_elem++;
263
264 /* Now update the prod */
265 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
266 if (likely(fp->sge_mask[i]))
267 break;
268
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300269 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
270 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000271 }
272
273 if (delta > 0) {
274 fp->rx_sge_prod += delta;
275 /* clear page-end entries */
276 bnx2x_clear_sge_mask_next_elems(fp);
277 }
278
279 DP(NETIF_MSG_RX_STATUS,
280 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
281 fp->last_max_sge, fp->rx_sge_prod);
282}
283
Eric Dumazete52fcb22011-11-14 06:05:34 +0000284/* Set Toeplitz hash value in the skb using the value from the
285 * CQE (calculated by HW).
286 */
287static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
288 const struct eth_fast_path_rx_cqe *cqe)
289{
290 /* Set Toeplitz hash from CQE */
291 if ((bp->dev->features & NETIF_F_RXHASH) &&
292 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
293 return le32_to_cpu(cqe->rss_hash_result);
294 return 0;
295}
296
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000297static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000298 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300299 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000300{
301 struct bnx2x *bp = fp->bp;
302 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
303 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
304 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
305 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300306 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
307 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000308
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300309 /* print error if current state != stop */
310 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000311 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
312
Eric Dumazete52fcb22011-11-14 06:05:34 +0000313 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300314 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000315 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300316 fp->rx_buf_size, DMA_FROM_DEVICE);
317 /*
318 * ...if it fails - move the skb from the consumer to the producer
319 * and set the current aggregation state as ERROR to drop it
320 * when TPA_STOP arrives.
321 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000322
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300323 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
324 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000325 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300326 tpa_info->tpa_state = BNX2X_TPA_ERROR;
327 return;
328 }
329
Eric Dumazete52fcb22011-11-14 06:05:34 +0000330 /* move empty data from pool to prod */
331 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300332 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000333 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000334 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
335 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
336
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300337 /* move partial skb from cons to pool (don't unmap yet) */
338 *first_buf = *cons_rx_buf;
339
340 /* mark bin state as START */
341 tpa_info->parsing_flags =
342 le16_to_cpu(cqe->pars_flags.flags);
343 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
344 tpa_info->tpa_state = BNX2X_TPA_START;
345 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
346 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000347 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000348 if (fp->mode == TPA_MODE_GRO) {
349 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
350 tpa_info->full_page =
351 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
352 tpa_info->gro_size = gro_size;
353 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300354
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000355#ifdef BNX2X_STOP_ON_ERROR
356 fp->tpa_queue_used |= (1 << queue);
357#ifdef _ASM_GENERIC_INT_L64_H
358 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
359#else
360 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
361#endif
362 fp->tpa_queue_used);
363#endif
364}
365
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000366/* Timestamp option length allowed for TPA aggregation:
367 *
368 * nop nop kind length echo val
369 */
370#define TPA_TSTAMP_OPT_LEN 12
371/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000372 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000373 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000374 * @bp: driver handle
375 * @parsing_flags: parsing flags from the START CQE
376 * @len_on_bd: total length of the first packet for the
377 * aggregation.
378 *
379 * Approximate value of the MSS for this aggregation calculated using
380 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000381 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000382static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
383 u16 len_on_bd)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000384{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300385 /*
386 * TPA arrgregation won't have either IP options or TCP options
387 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000388 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300389 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
390
391 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
392 PRS_FLAG_OVERETH_IPV6)
393 hdrs_len += sizeof(struct ipv6hdr);
394 else /* IPv4 */
395 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000396
397
398 /* Check if there was a TCP timestamp, if there is it's will
399 * always be 12 bytes length: nop nop kind length echo val.
400 *
401 * Otherwise FW would close the aggregation.
402 */
403 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
404 hdrs_len += TPA_TSTAMP_OPT_LEN;
405
406 return len_on_bd - hdrs_len;
407}
408
Eric Dumazet1191cb82012-04-27 21:39:21 +0000409static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
410 struct bnx2x_fastpath *fp, u16 index)
411{
412 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
413 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
414 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
415 dma_addr_t mapping;
416
417 if (unlikely(page == NULL)) {
418 BNX2X_ERR("Can't alloc sge\n");
419 return -ENOMEM;
420 }
421
422 mapping = dma_map_page(&bp->pdev->dev, page, 0,
423 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
424 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
425 __free_pages(page, PAGES_PER_SGE_SHIFT);
426 BNX2X_ERR("Can't map sge\n");
427 return -ENOMEM;
428 }
429
430 sw_buf->page = page;
431 dma_unmap_addr_set(sw_buf, mapping, mapping);
432
433 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
434 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
435
436 return 0;
437}
438
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000439static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000440 struct bnx2x_agg_info *tpa_info,
441 u16 pages,
442 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300443 struct eth_end_agg_rx_cqe *cqe,
444 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000445{
446 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000447 u32 i, frag_len, frag_size;
448 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300449 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000450 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000451
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300452 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000453
454 if (fp->mode == TPA_MODE_GRO) {
455 gro_size = tpa_info->gro_size;
456 full_page = tpa_info->full_page;
457 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000458
459 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000460 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300461 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
462 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000463
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000464 /* set for GRO */
465 if (fp->mode == TPA_MODE_GRO)
466 skb_shinfo(skb)->gso_type =
467 (GET_FLAG(tpa_info->parsing_flags,
468 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
469 PRS_FLAG_OVERETH_IPV6) ?
470 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
471 }
472
473
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000474#ifdef BNX2X_STOP_ON_ERROR
475 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
476 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
477 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300478 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000479 bnx2x_panic();
480 return -EINVAL;
481 }
482#endif
483
484 /* Run through the SGL and compose the fragmented skb */
485 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300486 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487
488 /* FW gives the indices of the SGE as if the ring is an array
489 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000490 if (fp->mode == TPA_MODE_GRO)
491 frag_len = min_t(u32, frag_size, (u32)full_page);
492 else /* LRO */
493 frag_len = min_t(u32, frag_size,
494 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
495
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000496 rx_pg = &fp->rx_page_ring[sge_idx];
497 old_rx_pg = *rx_pg;
498
499 /* If we fail to allocate a substitute page, we simply stop
500 where we are and drop the whole packet */
501 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
502 if (unlikely(err)) {
503 fp->eth_q_stats.rx_skb_alloc_failed++;
504 return err;
505 }
506
507 /* Unmap the page as we r going to pass it to the stack */
508 dma_unmap_page(&bp->pdev->dev,
509 dma_unmap_addr(&old_rx_pg, mapping),
510 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000511 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000512 if (fp->mode == TPA_MODE_LRO)
513 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
514 else { /* GRO */
515 int rem;
516 int offset = 0;
517 for (rem = frag_len; rem > 0; rem -= gro_size) {
518 int len = rem > gro_size ? gro_size : rem;
519 skb_fill_page_desc(skb, frag_id++,
520 old_rx_pg.page, offset, len);
521 if (offset)
522 get_page(old_rx_pg.page);
523 offset += len;
524 }
525 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000526
527 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000528 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529 skb->len += frag_len;
530
531 frag_size -= frag_len;
532 }
533
534 return 0;
535}
536
Eric Dumazet1191cb82012-04-27 21:39:21 +0000537static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
538 struct bnx2x_agg_info *tpa_info,
539 u16 pages,
540 struct eth_end_agg_rx_cqe *cqe,
541 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000542{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300543 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000544 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300545 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000546 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000547 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300548 u8 old_tpa_state = tpa_info->tpa_state;
549
550 tpa_info->tpa_state = BNX2X_TPA_STOP;
551
552 /* If we there was an error during the handling of the TPA_START -
553 * drop this aggregation.
554 */
555 if (old_tpa_state == BNX2X_TPA_ERROR)
556 goto drop;
557
Eric Dumazete52fcb22011-11-14 06:05:34 +0000558 /* Try to allocate the new data */
559 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000560
561 /* Unmap skb in the pool anyway, as we are going to change
562 pool entry status to BNX2X_TPA_STOP even if new skb allocation
563 fails. */
564 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800565 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000566 if (likely(new_data))
Eric Dumazetd3836f22012-04-27 00:33:38 +0000567 skb = build_skb(data, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000568
Eric Dumazete52fcb22011-11-14 06:05:34 +0000569 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000570#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800571 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000572 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800573 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000574 bnx2x_panic();
575 return;
576 }
577#endif
578
Eric Dumazete52fcb22011-11-14 06:05:34 +0000579 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000581 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000582
583 skb->protocol = eth_type_trans(skb, bp->dev);
584 skb->ip_summed = CHECKSUM_UNNECESSARY;
585
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000586 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
587 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300588 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
589 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000590 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000591 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000592 DP(NETIF_MSG_RX_STATUS,
593 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000594 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000595 }
596
597
Eric Dumazete52fcb22011-11-14 06:05:34 +0000598 /* put new data in bin */
599 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000600
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300601 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000602 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000603 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300604drop:
605 /* drop the packet and keep the buffer in the bin */
606 DP(NETIF_MSG_RX_STATUS,
607 "Failed to allocate or map a new skb - dropping packet!\n");
608 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000609}
610
Eric Dumazet1191cb82012-04-27 21:39:21 +0000611static int bnx2x_alloc_rx_data(struct bnx2x *bp,
612 struct bnx2x_fastpath *fp, u16 index)
613{
614 u8 *data;
615 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
616 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
617 dma_addr_t mapping;
618
619 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
620 if (unlikely(data == NULL))
621 return -ENOMEM;
622
623 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
624 fp->rx_buf_size,
625 DMA_FROM_DEVICE);
626 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
627 kfree(data);
628 BNX2X_ERR("Can't map rx data\n");
629 return -ENOMEM;
630 }
631
632 rx_buf->data = data;
633 dma_unmap_addr_set(rx_buf, mapping, mapping);
634
635 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
636 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
637
638 return 0;
639}
640
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000641static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
642 struct bnx2x_fastpath *fp)
643{
644 /* Do nothing if no IP/L4 csum validation was done */
645
646 if (cqe->fast_path_cqe.status_flags &
647 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
648 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
649 return;
650
651 /* If both IP/L4 validation were done, check if an error was found. */
652
653 if (cqe->fast_path_cqe.type_error_flags &
654 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
655 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
656 fp->eth_q_stats.hw_csum_err++;
657 else
658 skb->ip_summed = CHECKSUM_UNNECESSARY;
659}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000660
661int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
662{
663 struct bnx2x *bp = fp->bp;
664 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
665 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
666 int rx_pkt = 0;
667
668#ifdef BNX2X_STOP_ON_ERROR
669 if (unlikely(bp->panic))
670 return 0;
671#endif
672
673 /* CQ "next element" is of the size of the regular element,
674 that's why it's ok here */
675 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
676 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
677 hw_comp_cons++;
678
679 bd_cons = fp->rx_bd_cons;
680 bd_prod = fp->rx_bd_prod;
681 bd_prod_fw = bd_prod;
682 sw_comp_cons = fp->rx_comp_cons;
683 sw_comp_prod = fp->rx_comp_prod;
684
685 /* Memory barrier necessary as speculative reads of the rx
686 * buffer can be ahead of the index in the status block
687 */
688 rmb();
689
690 DP(NETIF_MSG_RX_STATUS,
691 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
692 fp->index, hw_comp_cons, sw_comp_cons);
693
694 while (sw_comp_cons != hw_comp_cons) {
695 struct sw_rx_bd *rx_buf = NULL;
696 struct sk_buff *skb;
697 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300698 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000699 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300700 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000701 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000702 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000703
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300704#ifdef BNX2X_STOP_ON_ERROR
705 if (unlikely(bp->panic))
706 return 0;
707#endif
708
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000709 comp_ring_cons = RCQ_BD(sw_comp_cons);
710 bd_prod = RX_BD(bd_prod);
711 bd_cons = RX_BD(bd_cons);
712
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000713 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300714 cqe_fp = &cqe->fast_path_cqe;
715 cqe_fp_flags = cqe_fp->type_error_flags;
716 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000717
Merav Sicron51c1a582012-03-18 10:33:38 +0000718 DP(NETIF_MSG_RX_STATUS,
719 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
720 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300721 cqe_fp_flags, cqe_fp->status_flags,
722 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000723 le16_to_cpu(cqe_fp->vlan_tag),
724 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725
726 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300727 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000728 bnx2x_sp_event(fp, cqe);
729 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000730 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000731
Eric Dumazete52fcb22011-11-14 06:05:34 +0000732 rx_buf = &fp->rx_buf_ring[bd_cons];
733 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000734
Eric Dumazete52fcb22011-11-14 06:05:34 +0000735 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000736 struct bnx2x_agg_info *tpa_info;
737 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300738#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000739 /* sanity check */
740 if (fp->disable_tpa &&
741 (CQE_TYPE_START(cqe_fp_type) ||
742 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000743 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000744 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300745#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000746
Eric Dumazete52fcb22011-11-14 06:05:34 +0000747 if (CQE_TYPE_START(cqe_fp_type)) {
748 u16 queue = cqe_fp->queue_index;
749 DP(NETIF_MSG_RX_STATUS,
750 "calling tpa_start on queue %d\n",
751 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000752
Eric Dumazete52fcb22011-11-14 06:05:34 +0000753 bnx2x_tpa_start(fp, queue,
754 bd_cons, bd_prod,
755 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000756
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000757 goto next_rx;
758
759 }
760 queue = cqe->end_agg_cqe.queue_index;
761 tpa_info = &fp->tpa_info[queue];
762 DP(NETIF_MSG_RX_STATUS,
763 "calling tpa_stop on queue %d\n",
764 queue);
765
766 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
767 tpa_info->len_on_bd;
768
769 if (fp->mode == TPA_MODE_GRO)
770 pages = (frag_size + tpa_info->full_page - 1) /
771 tpa_info->full_page;
772 else
773 pages = SGE_PAGE_ALIGN(frag_size) >>
774 SGE_PAGE_SHIFT;
775
776 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
777 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000778#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000779 if (bp->panic)
780 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000781#endif
782
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000783 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
784 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000785 }
786 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000787 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000788 pad = cqe_fp->placement_offset;
789 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000790 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000791 pad + RX_COPY_THRESH,
792 DMA_FROM_DEVICE);
793 pad += NET_SKB_PAD;
794 prefetch(data + pad); /* speedup eth_type_trans() */
795 /* is this an error packet? */
796 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000797 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000798 "ERROR flags %x rx packet %u\n",
799 cqe_fp_flags, sw_comp_cons);
800 fp->eth_q_stats.rx_err_discard_pkt++;
801 goto reuse_rx;
802 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000803
Eric Dumazete52fcb22011-11-14 06:05:34 +0000804 /* Since we don't have a jumbo ring
805 * copy small packets if mtu > 1500
806 */
807 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
808 (len <= RX_COPY_THRESH)) {
809 skb = netdev_alloc_skb_ip_align(bp->dev, len);
810 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000812 "ERROR packet dropped because of alloc failure\n");
813 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000814 goto reuse_rx;
815 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000816 memcpy(skb->data, data + pad, len);
817 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
818 } else {
819 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000820 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000821 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800822 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000823 DMA_FROM_DEVICE);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000824 skb = build_skb(data, 0);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000825 if (unlikely(!skb)) {
826 kfree(data);
827 fp->eth_q_stats.rx_skb_alloc_failed++;
828 goto next_rx;
829 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000830 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000831 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000832 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
833 "ERROR packet dropped because of alloc failure\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000834 fp->eth_q_stats.rx_skb_alloc_failed++;
835reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000836 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000837 goto next_rx;
838 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000839 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000840
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000841 skb_put(skb, len);
842 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000843
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000844 /* Set Toeplitz hash for a none-LRO skb */
845 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000846
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000847 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000848
Eric Dumazetd6cb3e42012-06-12 23:50:04 +0000849 if (bp->dev->features & NETIF_F_RXCSUM)
850 bnx2x_csum_validate(skb, cqe, fp);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300851
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000852
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000853 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000854
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300855 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
856 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000857 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300858 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000859 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000860
861
862next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000863 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000864
865 bd_cons = NEXT_RX_IDX(bd_cons);
866 bd_prod = NEXT_RX_IDX(bd_prod);
867 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
868 rx_pkt++;
869next_cqe:
870 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
871 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
872
873 if (rx_pkt == budget)
874 break;
875 } /* while */
876
877 fp->rx_bd_cons = bd_cons;
878 fp->rx_bd_prod = bd_prod_fw;
879 fp->rx_comp_cons = sw_comp_cons;
880 fp->rx_comp_prod = sw_comp_prod;
881
882 /* Update producers */
883 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
884 fp->rx_sge_prod);
885
886 fp->rx_pkt += rx_pkt;
887 fp->rx_calls++;
888
889 return rx_pkt;
890}
891
892static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
893{
894 struct bnx2x_fastpath *fp = fp_cookie;
895 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000896 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000897
Merav Sicron51c1a582012-03-18 10:33:38 +0000898 DP(NETIF_MSG_INTR,
899 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000900 fp->index, fp->fw_sb_id, fp->igu_sb_id);
901 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000902
903#ifdef BNX2X_STOP_ON_ERROR
904 if (unlikely(bp->panic))
905 return IRQ_HANDLED;
906#endif
907
908 /* Handle Rx and Tx according to MSI-X vector */
909 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000910
911 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +0000912 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000913
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000914 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
916
917 return IRQ_HANDLED;
918}
919
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000920/* HW Lock for shared dual port PHYs */
921void bnx2x_acquire_phy_lock(struct bnx2x *bp)
922{
923 mutex_lock(&bp->port.phy_mutex);
924
925 if (bp->port.need_hw_lock)
926 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
927}
928
929void bnx2x_release_phy_lock(struct bnx2x *bp)
930{
931 if (bp->port.need_hw_lock)
932 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
933
934 mutex_unlock(&bp->port.phy_mutex);
935}
936
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800937/* calculates MF speed according to current linespeed and MF configuration */
938u16 bnx2x_get_mf_speed(struct bnx2x *bp)
939{
940 u16 line_speed = bp->link_vars.line_speed;
941 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000942 u16 maxCfg = bnx2x_extract_max_cfg(bp,
943 bp->mf_config[BP_VN(bp)]);
944
945 /* Calculate the current MAX line speed limit for the MF
946 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800947 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000948 if (IS_MF_SI(bp))
949 line_speed = (line_speed * maxCfg) / 100;
950 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800951 u16 vn_max_rate = maxCfg * 100;
952
953 if (vn_max_rate < line_speed)
954 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000955 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800956 }
957
958 return line_speed;
959}
960
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000961/**
962 * bnx2x_fill_report_data - fill link report data to report
963 *
964 * @bp: driver handle
965 * @data: link state to update
966 *
967 * It uses a none-atomic bit operations because is called under the mutex.
968 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000969static void bnx2x_fill_report_data(struct bnx2x *bp,
970 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000971{
972 u16 line_speed = bnx2x_get_mf_speed(bp);
973
974 memset(data, 0, sizeof(*data));
975
976 /* Fill the report data: efective line speed */
977 data->line_speed = line_speed;
978
979 /* Link is down */
980 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
981 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
982 &data->link_report_flags);
983
984 /* Full DUPLEX */
985 if (bp->link_vars.duplex == DUPLEX_FULL)
986 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
987
988 /* Rx Flow Control is ON */
989 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
990 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
991
992 /* Tx Flow Control is ON */
993 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
994 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
995}
996
997/**
998 * bnx2x_link_report - report link status to OS.
999 *
1000 * @bp: driver handle
1001 *
1002 * Calls the __bnx2x_link_report() under the same locking scheme
1003 * as a link/PHY state managing code to ensure a consistent link
1004 * reporting.
1005 */
1006
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001007void bnx2x_link_report(struct bnx2x *bp)
1008{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001009 bnx2x_acquire_phy_lock(bp);
1010 __bnx2x_link_report(bp);
1011 bnx2x_release_phy_lock(bp);
1012}
1013
1014/**
1015 * __bnx2x_link_report - report link status to OS.
1016 *
1017 * @bp: driver handle
1018 *
1019 * None atomic inmlementation.
1020 * Should be called under the phy_lock.
1021 */
1022void __bnx2x_link_report(struct bnx2x *bp)
1023{
1024 struct bnx2x_link_report_data cur_data;
1025
1026 /* reread mf_cfg */
1027 if (!CHIP_IS_E1(bp))
1028 bnx2x_read_mf_cfg(bp);
1029
1030 /* Read the current link report info */
1031 bnx2x_fill_report_data(bp, &cur_data);
1032
1033 /* Don't report link down or exactly the same link status twice */
1034 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1035 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1036 &bp->last_reported_link.link_report_flags) &&
1037 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1038 &cur_data.link_report_flags)))
1039 return;
1040
1041 bp->link_cnt++;
1042
1043 /* We are going to report a new link parameters now -
1044 * remember the current data for the next time.
1045 */
1046 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1047
1048 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1049 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001050 netif_carrier_off(bp->dev);
1051 netdev_err(bp->dev, "NIC Link is Down\n");
1052 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001053 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001054 const char *duplex;
1055 const char *flow;
1056
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001057 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001058
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001059 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1060 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001061 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001062 else
Joe Perches94f05b02011-08-14 12:16:20 +00001063 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001064
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001065 /* Handle the FC at the end so that only these flags would be
1066 * possibly set. This way we may easily check if there is no FC
1067 * enabled.
1068 */
1069 if (cur_data.link_report_flags) {
1070 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1071 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001072 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1073 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001074 flow = "ON - receive & transmit";
1075 else
1076 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001077 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001078 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001079 }
Joe Perches94f05b02011-08-14 12:16:20 +00001080 } else {
1081 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001082 }
Joe Perches94f05b02011-08-14 12:16:20 +00001083 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1084 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001085 }
1086}
1087
Eric Dumazet1191cb82012-04-27 21:39:21 +00001088static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1089{
1090 int i;
1091
1092 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1093 struct eth_rx_sge *sge;
1094
1095 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1096 sge->addr_hi =
1097 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1098 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1099
1100 sge->addr_lo =
1101 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1102 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1103 }
1104}
1105
1106static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1107 struct bnx2x_fastpath *fp, int last)
1108{
1109 int i;
1110
1111 for (i = 0; i < last; i++) {
1112 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1113 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1114 u8 *data = first_buf->data;
1115
1116 if (data == NULL) {
1117 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1118 continue;
1119 }
1120 if (tpa_info->tpa_state == BNX2X_TPA_START)
1121 dma_unmap_single(&bp->pdev->dev,
1122 dma_unmap_addr(first_buf, mapping),
1123 fp->rx_buf_size, DMA_FROM_DEVICE);
1124 kfree(data);
1125 first_buf->data = NULL;
1126 }
1127}
1128
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001129void bnx2x_init_rx_rings(struct bnx2x *bp)
1130{
1131 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001132 u16 ring_prod;
1133 int i, j;
1134
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001135 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001136 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001137 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001138
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001139 DP(NETIF_MSG_IFUP,
1140 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1141
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001142 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001143 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001144 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001145 struct bnx2x_agg_info *tpa_info =
1146 &fp->tpa_info[i];
1147 struct sw_rx_bd *first_buf =
1148 &tpa_info->first_buf;
1149
Eric Dumazete52fcb22011-11-14 06:05:34 +00001150 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1151 GFP_ATOMIC);
1152 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001153 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1154 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001155 bnx2x_free_tpa_pool(bp, fp, i);
1156 fp->disable_tpa = 1;
1157 break;
1158 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001159 dma_unmap_addr_set(first_buf, mapping, 0);
1160 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001161 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001162
1163 /* "next page" elements initialization */
1164 bnx2x_set_next_page_sgl(fp);
1165
1166 /* set SGEs bit mask */
1167 bnx2x_init_sge_ring_bit_mask(fp);
1168
1169 /* Allocate SGEs and initialize the ring elements */
1170 for (i = 0, ring_prod = 0;
1171 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1172
1173 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001174 BNX2X_ERR("was only able to allocate %d rx sges\n",
1175 i);
1176 BNX2X_ERR("disabling TPA for queue[%d]\n",
1177 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001178 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001179 bnx2x_free_rx_sge_range(bp, fp,
1180 ring_prod);
1181 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001182 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001183 fp->disable_tpa = 1;
1184 ring_prod = 0;
1185 break;
1186 }
1187 ring_prod = NEXT_SGE_IDX(ring_prod);
1188 }
1189
1190 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001191 }
1192 }
1193
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001194 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001195 struct bnx2x_fastpath *fp = &bp->fp[j];
1196
1197 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001198
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001199 /* Activate BD ring */
1200 /* Warning!
1201 * this will generate an interrupt (to the TSTORM)
1202 * must only be done after chip is initialized
1203 */
1204 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1205 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001206
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001207 if (j != 0)
1208 continue;
1209
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001210 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001211 REG_WR(bp, BAR_USTRORM_INTMEM +
1212 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1213 U64_LO(fp->rx_comp_mapping));
1214 REG_WR(bp, BAR_USTRORM_INTMEM +
1215 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1216 U64_HI(fp->rx_comp_mapping));
1217 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001218 }
1219}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001220
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001221static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1222{
1223 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001224 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001225
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001226 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001227 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001228 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00001229 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001230 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001231
Ariel Elior6383c0b2011-07-14 08:31:57 +00001232 u16 sw_prod = txdata->tx_pkt_prod;
1233 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001234
Ariel Elior6383c0b2011-07-14 08:31:57 +00001235 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001236 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1237 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001238 sw_cons++;
1239 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001240 netdev_tx_reset_queue(
Merav Sicron65565882012-06-19 07:48:26 +00001241 netdev_get_tx_queue(bp->dev,
1242 txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001243 }
1244 }
1245}
1246
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001247static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1248{
1249 struct bnx2x *bp = fp->bp;
1250 int i;
1251
1252 /* ring wasn't allocated */
1253 if (fp->rx_buf_ring == NULL)
1254 return;
1255
1256 for (i = 0; i < NUM_RX_BD; i++) {
1257 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001258 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001259
Eric Dumazete52fcb22011-11-14 06:05:34 +00001260 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001261 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(rx_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
1265
Eric Dumazete52fcb22011-11-14 06:05:34 +00001266 rx_buf->data = NULL;
1267 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001268 }
1269}
1270
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001271static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1272{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001273 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001274
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001275 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001276 struct bnx2x_fastpath *fp = &bp->fp[j];
1277
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001278 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001279
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001280 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001281 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001282 }
1283}
1284
1285void bnx2x_free_skbs(struct bnx2x *bp)
1286{
1287 bnx2x_free_tx_skbs(bp);
1288 bnx2x_free_rx_skbs(bp);
1289}
1290
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001291void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1292{
1293 /* load old values */
1294 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1295
1296 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1297 /* leave all but MAX value */
1298 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1299
1300 /* set new MAX value */
1301 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1302 & FUNC_MF_CFG_MAX_BW_MASK;
1303
1304 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1305 }
1306}
1307
Dmitry Kravkovca924292011-06-14 01:33:08 +00001308/**
1309 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1310 *
1311 * @bp: driver handle
1312 * @nvecs: number of vectors to be released
1313 */
1314static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001315{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001316 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001317
Dmitry Kravkovca924292011-06-14 01:33:08 +00001318 if (nvecs == offset)
1319 return;
1320 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001321 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001322 bp->msix_table[offset].vector);
1323 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001324#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001325 if (nvecs == offset)
1326 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001327 offset++;
1328#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001329
Dmitry Kravkovca924292011-06-14 01:33:08 +00001330 for_each_eth_queue(bp, i) {
1331 if (nvecs == offset)
1332 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001333 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1334 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001335
1336 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001337 }
1338}
1339
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001340void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001341{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001342 if (bp->flags & USING_MSIX_FLAG &&
1343 !(bp->flags & USING_SINGLE_MSIX_FLAG))
Dmitry Kravkovca924292011-06-14 01:33:08 +00001344 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001345 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001346 else
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001347 free_irq(bp->dev->irq, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001348}
1349
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001350int __devinit bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001351{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001352 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001353
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001354 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001355 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001356 bp->msix_table[0].entry);
1357 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001358
1359#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001360 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001361 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001362 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1363 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001364#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001365 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001366 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001367 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001368 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1369 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001370 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001371 }
1372
Ariel Elior6383c0b2011-07-14 08:31:57 +00001373 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001374
1375 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001376
1377 /*
1378 * reconfigure number of tx/rx queues according to available
1379 * MSI-X vectors
1380 */
1381 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001382 /* how less vectors we will have? */
1383 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384
Merav Sicron51c1a582012-03-18 10:33:38 +00001385 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001386
1387 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1388
1389 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001390 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1391 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001392 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001393 /*
1394 * decrease number of queues by number of unallocated entries
1395 */
1396 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001397
Merav Sicron51c1a582012-03-18 10:33:38 +00001398 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001399 bp->num_queues);
1400 } else if (rc > 0) {
1401 /* Get by with single vector */
1402 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1403 if (rc) {
1404 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1405 rc);
1406 goto no_msix;
1407 }
1408
1409 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1410 bp->flags |= USING_SINGLE_MSIX_FLAG;
1411
1412 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001413 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001414 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001415 }
1416
1417 bp->flags |= USING_MSIX_FLAG;
1418
1419 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001420
1421no_msix:
1422 /* fall to INTx if not enough memory */
1423 if (rc == -ENOMEM)
1424 bp->flags |= DISABLE_MSI_FLAG;
1425
1426 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001427}
1428
1429static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1430{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001431 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001432
Dmitry Kravkovca924292011-06-14 01:33:08 +00001433 rc = request_irq(bp->msix_table[offset++].vector,
1434 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001435 bp->dev->name, bp->dev);
1436 if (rc) {
1437 BNX2X_ERR("request sp irq failed\n");
1438 return -EBUSY;
1439 }
1440
1441#ifdef BCM_CNIC
1442 offset++;
1443#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001444 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001445 struct bnx2x_fastpath *fp = &bp->fp[i];
1446 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1447 bp->dev->name, i);
1448
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001449 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001450 bnx2x_msix_fp_int, 0, fp->name, fp);
1451 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001452 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1453 bp->msix_table[offset].vector, rc);
1454 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001455 return -EBUSY;
1456 }
1457
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001458 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001459 }
1460
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001461 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001462 offset = 1 + CNIC_PRESENT;
Merav Sicron51c1a582012-03-18 10:33:38 +00001463 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464 bp->msix_table[0].vector,
1465 0, bp->msix_table[offset].vector,
1466 i - 1, bp->msix_table[offset + i - 1].vector);
1467
1468 return 0;
1469}
1470
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001471int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472{
1473 int rc;
1474
1475 rc = pci_enable_msi(bp->pdev);
1476 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001477 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001478 return -1;
1479 }
1480 bp->flags |= USING_MSI_FLAG;
1481
1482 return 0;
1483}
1484
1485static int bnx2x_req_irq(struct bnx2x *bp)
1486{
1487 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001488 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001489
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001490 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001491 flags = 0;
1492 else
1493 flags = IRQF_SHARED;
1494
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001495 if (bp->flags & USING_MSIX_FLAG)
1496 irq = bp->msix_table[0].vector;
1497 else
1498 irq = bp->pdev->irq;
1499
1500 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001501}
1502
Eric Dumazet1191cb82012-04-27 21:39:21 +00001503static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001504{
1505 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001506 if (bp->flags & USING_MSIX_FLAG &&
1507 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001508 rc = bnx2x_req_msix_irqs(bp);
1509 if (rc)
1510 return rc;
1511 } else {
1512 bnx2x_ack_int(bp);
1513 rc = bnx2x_req_irq(bp);
1514 if (rc) {
1515 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1516 return rc;
1517 }
1518 if (bp->flags & USING_MSI_FLAG) {
1519 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001520 netdev_info(bp->dev, "using MSI IRQ %d\n",
1521 bp->dev->irq);
1522 }
1523 if (bp->flags & USING_MSIX_FLAG) {
1524 bp->dev->irq = bp->msix_table[0].vector;
1525 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1526 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001527 }
1528 }
1529
1530 return 0;
1531}
1532
Eric Dumazet1191cb82012-04-27 21:39:21 +00001533static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001534{
1535 int i;
1536
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001537 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001538 napi_enable(&bnx2x_fp(bp, i, napi));
1539}
1540
Eric Dumazet1191cb82012-04-27 21:39:21 +00001541static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001542{
1543 int i;
1544
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001545 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001546 napi_disable(&bnx2x_fp(bp, i, napi));
1547}
1548
1549void bnx2x_netif_start(struct bnx2x *bp)
1550{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001551 if (netif_running(bp->dev)) {
1552 bnx2x_napi_enable(bp);
1553 bnx2x_int_enable(bp);
1554 if (bp->state == BNX2X_STATE_OPEN)
1555 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001556 }
1557}
1558
1559void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1560{
1561 bnx2x_int_disable_sync(bp, disable_hw);
1562 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001563}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001564
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001565u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1566{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001567 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001568
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001569#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001570 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001571 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1572 u16 ether_type = ntohs(hdr->h_proto);
1573
1574 /* Skip VLAN tag if present */
1575 if (ether_type == ETH_P_8021Q) {
1576 struct vlan_ethhdr *vhdr =
1577 (struct vlan_ethhdr *)skb->data;
1578
1579 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1580 }
1581
1582 /* If ethertype is FCoE or FIP - use FCoE ring */
1583 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001584 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001585 }
1586#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001587 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001588 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001589}
1590
Dmitry Kravkov96305232012-04-03 18:41:30 +00001591
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001592void bnx2x_set_num_queues(struct bnx2x *bp)
1593{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001594 /* RSS queues */
1595 bp->num_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001596
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001597#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00001598 /* override in STORAGE SD modes */
1599 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001600 bp->num_queues = 1;
1601#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001602 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001603 bp->num_queues += NON_ETH_CONTEXT_USE;
Merav Sicron65565882012-06-19 07:48:26 +00001604
1605 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001606}
1607
David S. Miller823dcd22011-08-20 10:39:12 -07001608/**
1609 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1610 *
1611 * @bp: Driver handle
1612 *
1613 * We currently support for at most 16 Tx queues for each CoS thus we will
1614 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1615 * bp->max_cos.
1616 *
1617 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1618 * index after all ETH L2 indices.
1619 *
1620 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1621 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1622 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1623 *
1624 * The proper configuration of skb->queue_mapping is handled by
1625 * bnx2x_select_queue() and __skb_tx_hash().
1626 *
1627 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1628 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1629 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001630static int bnx2x_set_real_num_queues(struct bnx2x *bp)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001631{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001632 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001633
Merav Sicron65565882012-06-19 07:48:26 +00001634 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1635 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001636
1637/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001638#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001639 if (!NO_FCOE(bp)) {
1640 rx += FCOE_PRESENT;
1641 tx += FCOE_PRESENT;
1642 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001643#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001644
1645 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1646 if (rc) {
1647 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1648 return rc;
1649 }
1650 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1651 if (rc) {
1652 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1653 return rc;
1654 }
1655
Merav Sicron51c1a582012-03-18 10:33:38 +00001656 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001657 tx, rx);
1658
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001659 return rc;
1660}
1661
Eric Dumazet1191cb82012-04-27 21:39:21 +00001662static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001663{
1664 int i;
1665
1666 for_each_queue(bp, i) {
1667 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001668 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001669
1670 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1671 if (IS_FCOE_IDX(i))
1672 /*
1673 * Although there are no IP frames expected to arrive to
1674 * this ring we still want to add an
1675 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1676 * overrun attack.
1677 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001678 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001679 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001680 mtu = bp->dev->mtu;
1681 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1682 IP_HEADER_ALIGNMENT_PADDING +
1683 ETH_OVREHEAD +
1684 mtu +
1685 BNX2X_FW_RX_ALIGN_END;
1686 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001687 }
1688}
1689
Eric Dumazet1191cb82012-04-27 21:39:21 +00001690static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001691{
1692 int i;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001693 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1694
Dmitry Kravkov96305232012-04-03 18:41:30 +00001695 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001696 * enabled
1697 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001698 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1699 bp->rss_conf_obj.ind_table[i] =
Dmitry Kravkov96305232012-04-03 18:41:30 +00001700 bp->fp->cl_id +
1701 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001702
1703 /*
1704 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1705 * per-port, so if explicit configuration is needed , do it only
1706 * for a PMF.
1707 *
1708 * For 57712 and newer on the other hand it's a per-function
1709 * configuration.
1710 */
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001711 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001712}
1713
Dmitry Kravkov96305232012-04-03 18:41:30 +00001714int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001715 bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001716{
Yuval Mintz3b603062012-03-18 10:33:39 +00001717 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001718 int i;
1719
1720 /* Although RSS is meaningless when there is a single HW queue we
1721 * still need it enabled in order to have HW Rx hash generated.
1722 *
1723 * if (!is_eth_multi(bp))
1724 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1725 */
1726
Dmitry Kravkov96305232012-04-03 18:41:30 +00001727 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001728
1729 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1730
Dmitry Kravkov96305232012-04-03 18:41:30 +00001731 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001732
Dmitry Kravkov96305232012-04-03 18:41:30 +00001733 /* RSS configuration */
1734 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1735 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1736 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1737 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001738 if (rss_obj->udp_rss_v4)
1739 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1740 if (rss_obj->udp_rss_v6)
1741 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001742
Dmitry Kravkov96305232012-04-03 18:41:30 +00001743 /* Hash bits */
1744 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001745
Merav Sicron5d317c6a2012-06-19 07:48:24 +00001746 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001747
Dmitry Kravkov96305232012-04-03 18:41:30 +00001748 if (config_hash) {
1749 /* RSS keys */
1750 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1751 params.rss_key[i] = random32();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001752
Dmitry Kravkov96305232012-04-03 18:41:30 +00001753 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001754 }
1755
1756 return bnx2x_config_rss(bp, &params);
1757}
1758
Eric Dumazet1191cb82012-04-27 21:39:21 +00001759static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001760{
Yuval Mintz3b603062012-03-18 10:33:39 +00001761 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001762
1763 /* Prepare parameters for function state transitions */
1764 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1765
1766 func_params.f_obj = &bp->func_obj;
1767 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1768
1769 func_params.params.hw_init.load_phase = load_code;
1770
1771 return bnx2x_func_state_change(bp, &func_params);
1772}
1773
1774/*
1775 * Cleans the object that have internal lists without sending
1776 * ramrods. Should be run when interrutps are disabled.
1777 */
1778static void bnx2x_squeeze_objects(struct bnx2x *bp)
1779{
1780 int rc;
1781 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00001782 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001783 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1784
1785 /***************** Cleanup MACs' object first *************************/
1786
1787 /* Wait for completion of requested */
1788 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1789 /* Perform a dry cleanup */
1790 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1791
1792 /* Clean ETH primary MAC */
1793 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1794 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1795 &ramrod_flags);
1796 if (rc != 0)
1797 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1798
1799 /* Cleanup UC list */
1800 vlan_mac_flags = 0;
1801 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1802 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1803 &ramrod_flags);
1804 if (rc != 0)
1805 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1806
1807 /***************** Now clean mcast object *****************************/
1808 rparam.mcast_obj = &bp->mcast_obj;
1809 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1810
1811 /* Add a DEL command... */
1812 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1813 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00001814 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1815 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001816
1817 /* ...and wait until all pending commands are cleared */
1818 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1819 while (rc != 0) {
1820 if (rc < 0) {
1821 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1822 rc);
1823 return;
1824 }
1825
1826 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1827 }
1828}
1829
1830#ifndef BNX2X_STOP_ON_ERROR
1831#define LOAD_ERROR_EXIT(bp, label) \
1832 do { \
1833 (bp)->state = BNX2X_STATE_ERROR; \
1834 goto label; \
1835 } while (0)
1836#else
1837#define LOAD_ERROR_EXIT(bp, label) \
1838 do { \
1839 (bp)->state = BNX2X_STATE_ERROR; \
1840 (bp)->panic = 1; \
1841 return -EBUSY; \
1842 } while (0)
1843#endif
1844
Yuval Mintz452427b2012-03-26 20:47:07 +00001845bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1846{
1847 /* build FW version dword */
1848 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1849 (BCM_5710_FW_MINOR_VERSION << 8) +
1850 (BCM_5710_FW_REVISION_VERSION << 16) +
1851 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1852
1853 /* read loaded FW from chip */
1854 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1855
1856 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1857
1858 if (loaded_fw != my_fw) {
1859 if (is_err)
1860 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1861 loaded_fw, my_fw);
1862 return false;
1863 }
1864
1865 return true;
1866}
1867
Eric Dumazet1191cb82012-04-27 21:39:21 +00001868/**
1869 * bnx2x_bz_fp - zero content of the fastpath structure.
1870 *
1871 * @bp: driver handle
1872 * @index: fastpath index to be zeroed
1873 *
1874 * Makes sure the contents of the bp->fp[index].napi is kept
1875 * intact.
1876 */
1877static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1878{
1879 struct bnx2x_fastpath *fp = &bp->fp[index];
Merav Sicron65565882012-06-19 07:48:26 +00001880 int cos;
Eric Dumazet1191cb82012-04-27 21:39:21 +00001881 struct napi_struct orig_napi = fp->napi;
1882 /* bzero bnx2x_fastpath contents */
1883 if (bp->stats_init)
1884 memset(fp, 0, sizeof(*fp));
1885 else {
1886 /* Keep Queue statistics */
1887 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1888 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1889
1890 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1891 GFP_KERNEL);
1892 if (tmp_eth_q_stats)
1893 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1894 sizeof(struct bnx2x_eth_q_stats));
1895
1896 tmp_eth_q_stats_old =
1897 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1898 GFP_KERNEL);
1899 if (tmp_eth_q_stats_old)
1900 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1901 sizeof(struct bnx2x_eth_q_stats_old));
1902
1903 memset(fp, 0, sizeof(*fp));
1904
1905 if (tmp_eth_q_stats) {
1906 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1907 sizeof(struct bnx2x_eth_q_stats));
1908 kfree(tmp_eth_q_stats);
1909 }
1910
1911 if (tmp_eth_q_stats_old) {
1912 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1913 sizeof(struct bnx2x_eth_q_stats_old));
1914 kfree(tmp_eth_q_stats_old);
1915 }
1916
1917 }
1918
1919 /* Restore the NAPI object as it has been already initialized */
1920 fp->napi = orig_napi;
1921
1922 fp->bp = bp;
1923 fp->index = index;
1924 if (IS_ETH_FP(fp))
1925 fp->max_cos = bp->max_cos;
1926 else
1927 /* Special queues support only one CoS */
1928 fp->max_cos = 1;
1929
Merav Sicron65565882012-06-19 07:48:26 +00001930 /* Init txdata pointers */
1931#ifdef BCM_CNIC
1932 if (IS_FCOE_FP(fp))
1933 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1934#endif
1935 if (IS_ETH_FP(fp))
1936 for_each_cos_in_tx_queue(fp, cos)
1937 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1938 BNX2X_NUM_ETH_QUEUES(bp) + index];
1939
Eric Dumazet1191cb82012-04-27 21:39:21 +00001940 /*
1941 * set the tpa flag for each queue. The tpa flag determines the queue
1942 * minimal size so it must be set prior to queue memory allocation
1943 */
1944 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1945 (bp->flags & GRO_ENABLE_FLAG &&
1946 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1947 if (bp->flags & TPA_ENABLE_FLAG)
1948 fp->mode = TPA_MODE_LRO;
1949 else if (bp->flags & GRO_ENABLE_FLAG)
1950 fp->mode = TPA_MODE_GRO;
1951
1952#ifdef BCM_CNIC
1953 /* We don't want TPA on an FCoE L2 ring */
1954 if (IS_FCOE_FP(fp))
1955 fp->disable_tpa = 1;
1956#endif
1957}
1958
1959
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001960/* must be called with rtnl_lock */
1961int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1962{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001963 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001964 u32 load_code;
1965 int i, rc;
1966
1967#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00001968 if (unlikely(bp->panic)) {
1969 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001970 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00001971 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001972#endif
1973
1974 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1975
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001976 /* Set the initial link reported state to link down */
1977 bnx2x_acquire_phy_lock(bp);
1978 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1979 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1980 &bp->last_reported_link.link_report_flags);
1981 bnx2x_release_phy_lock(bp);
1982
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001983 /* must be called before memory allocation and HW init */
1984 bnx2x_ilt_set_info(bp);
1985
Ariel Elior6383c0b2011-07-14 08:31:57 +00001986 /*
1987 * Zero fastpath structures preserving invariants like napi, which are
1988 * allocated only once, fp index, max_cos, bp pointer.
Merav Sicron65565882012-06-19 07:48:26 +00001989 * Also set fp->disable_tpa and txdata_ptr.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001990 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001991 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001992 for_each_queue(bp, i)
1993 bnx2x_bz_fp(bp, i);
Merav Sicron65565882012-06-19 07:48:26 +00001994 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
1995 sizeof(struct bnx2x_fp_txdata));
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001996
Ariel Elior6383c0b2011-07-14 08:31:57 +00001997
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001998 /* Set the receive queues buffer size */
1999 bnx2x_set_rx_buf_size(bp);
2000
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002001 if (bnx2x_alloc_mem(bp))
2002 return -ENOMEM;
2003
2004 /* As long as bnx2x_alloc_mem() may possibly update
2005 * bp->num_queues, bnx2x_set_real_num_queues() should always
2006 * come after it.
2007 */
2008 rc = bnx2x_set_real_num_queues(bp);
2009 if (rc) {
2010 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002011 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002012 }
2013
Ariel Elior6383c0b2011-07-14 08:31:57 +00002014 /* configure multi cos mappings in kernel.
2015 * this configuration may be overriden by a multi class queue discipline
2016 * or by a dcbx negotiation result.
2017 */
2018 bnx2x_setup_tc(bp->dev, bp->max_cos);
2019
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002020 bnx2x_napi_enable(bp);
2021
Ariel Elior889b9af2012-01-26 06:01:51 +00002022 /* set pf load just before approaching the MCP */
2023 bnx2x_set_pf_load(bp);
2024
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002025 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002026 * Returns the type of LOAD command:
2027 * if it is the first port to be initialized
2028 * common blocks should be initialized, otherwise - not
2029 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002030 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00002031 /* init fw_seq */
2032 bp->fw_seq =
2033 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2034 DRV_MSG_SEQ_NUMBER_MASK);
2035 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2036
2037 /* Get current FW pulse sequence */
2038 bp->fw_drv_pulse_wr_seq =
2039 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2040 DRV_PULSE_SEQ_MASK);
2041 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2042
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002043 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002044 if (!load_code) {
2045 BNX2X_ERR("MCP response failure, aborting\n");
2046 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002047 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002048 }
2049 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002050 BNX2X_ERR("Driver load refused\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002051 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002052 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002053 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00002054 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2055 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002056 /* abort nic load if version mismatch */
Yuval Mintz452427b2012-03-26 20:47:07 +00002057 if (!bnx2x_test_firmware_version(bp, true)) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002058 rc = -EBUSY;
2059 LOAD_ERROR_EXIT(bp, load_error2);
2060 }
2061 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002062
2063 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002064 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002065
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002066 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2067 path, load_count[path][0], load_count[path][1],
2068 load_count[path][2]);
2069 load_count[path][0]++;
2070 load_count[path][1 + port]++;
2071 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2072 path, load_count[path][0], load_count[path][1],
2073 load_count[path][2]);
2074 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002075 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002076 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002077 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2078 else
2079 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2080 }
2081
2082 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002083 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002084 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002085 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002086 /*
2087 * We need the barrier to ensure the ordering between the
2088 * writing to bp->port.pmf here and reading it from the
2089 * bnx2x_periodic_task().
2090 */
2091 smp_mb();
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002092 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002093 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002094
Merav Sicron51c1a582012-03-18 10:33:38 +00002095 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002096
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002097 /* Init Function state controlling object */
2098 bnx2x__init_func_obj(bp);
2099
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002100 /* Initialize HW */
2101 rc = bnx2x_init_hw(bp, load_code);
2102 if (rc) {
2103 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002104 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002105 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002106 }
2107
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002108 /* Connect to IRQs */
2109 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002110 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002111 BNX2X_ERR("IRQs setup failed\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002112 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002113 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002114 }
2115
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002116 /* Setup NIC internals and enable interrupts */
2117 bnx2x_nic_init(bp, load_code);
2118
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002119 /* Init per-function objects */
2120 bnx2x_init_bp_objs(bp);
2121
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002122 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2123 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002124 (bp->common.shmem2_base)) {
2125 if (SHMEM2_HAS(bp, dcc_support))
2126 SHMEM2_WR(bp, dcc_support,
2127 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2128 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
Barak Witkowskia3348722012-04-23 03:04:46 +00002129 if (SHMEM2_HAS(bp, afex_driver_support))
2130 SHMEM2_WR(bp, afex_driver_support,
2131 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002132 }
2133
Barak Witkowskia3348722012-04-23 03:04:46 +00002134 /* Set AFEX default VLAN tag to an invalid value */
2135 bp->afex_def_vlan_tag = -1;
2136
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002137 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2138 rc = bnx2x_func_start(bp);
2139 if (rc) {
2140 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00002141 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002142 LOAD_ERROR_EXIT(bp, load_error3);
2143 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002144
2145 /* Send LOAD_DONE command to MCP */
2146 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002147 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002148 if (!load_code) {
2149 BNX2X_ERR("MCP response failure, aborting\n");
2150 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002151 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002152 }
2153 }
2154
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002155 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002156 if (rc) {
2157 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002158 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002159 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002160
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002161#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002162 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002163 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002164#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002165
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002166 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002167 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Merav Sicron51c1a582012-03-18 10:33:38 +00002168 if (rc) {
2169 BNX2X_ERR("Queue setup failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002170 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002171 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002172 }
2173
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002174 rc = bnx2x_init_rss_pf(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002175 if (rc) {
2176 BNX2X_ERR("PF RSS init failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002177 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002178 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002179
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002180 /* Now when Clients are configured we are ready to work */
2181 bp->state = BNX2X_STATE_OPEN;
2182
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002183 /* Configure a ucast MAC */
2184 rc = bnx2x_set_eth_mac(bp, true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002185 if (rc) {
2186 BNX2X_ERR("Setting Ethernet MAC failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002187 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002188 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002189
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002190 if (bp->pending_max) {
2191 bnx2x_update_max_mf_config(bp, bp->pending_max);
2192 bp->pending_max = 0;
2193 }
2194
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002195 if (bp->port.pmf)
2196 bnx2x_initial_phy_init(bp, load_mode);
2197
2198 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002199
2200 /* Initialize Rx filter. */
2201 netif_addr_lock_bh(bp->dev);
2202 bnx2x_set_rx_mode(bp->dev);
2203 netif_addr_unlock_bh(bp->dev);
2204
2205 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002206 switch (load_mode) {
2207 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002208 /* Tx queue should be only reenabled */
2209 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002210 break;
2211
2212 case LOAD_OPEN:
2213 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002214 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002215 break;
2216
2217 case LOAD_DIAG:
Merav Sicron8970b2e2012-06-19 07:48:22 +00002218 case LOAD_LOOPBACK_EXT:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002219 bp->state = BNX2X_STATE_DIAG;
2220 break;
2221
2222 default:
2223 break;
2224 }
2225
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002226 if (bp->port.pmf)
Yuval Mintze695a2d2012-03-12 11:22:06 +00002227 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002228 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002229 bnx2x__link_status_update(bp);
2230
2231 /* start the timer */
2232 mod_timer(&bp->timer, jiffies + bp->current_interval);
2233
2234#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002235 /* re-read iscsi info */
2236 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002237 bnx2x_setup_cnic_irq_info(bp);
2238 if (bp->state == BNX2X_STATE_OPEN)
2239 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2240#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002241
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002242 /* mark driver is loaded in shmem2 */
2243 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2244 u32 val;
2245 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2246 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2247 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2248 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2249 }
2250
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002251 /* Wait for all pending SP commands to complete */
2252 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2253 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2254 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2255 return -EBUSY;
2256 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002257
Merav Sicron8970b2e2012-06-19 07:48:22 +00002258 if (bp->state != BNX2X_STATE_DIAG)
2259 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002260 return 0;
2261
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002262#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002263load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002264#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002265 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002266 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002267#endif
2268load_error3:
2269 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002270
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002271 /* Clean queueable objects */
2272 bnx2x_squeeze_objects(bp);
2273
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002274 /* Free SKBs, SGEs, TPA pool and driver internals */
2275 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002276 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002277 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002278
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002279 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002280 bnx2x_free_irq(bp);
2281load_error2:
2282 if (!BP_NOMCP(bp)) {
2283 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2284 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2285 }
2286
2287 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002288load_error1:
2289 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002290 /* clear pf_load status, as it was already set */
2291 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002292load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002293 bnx2x_free_mem(bp);
2294
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002295 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002296#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002297}
2298
2299/* must be called with rtnl_lock */
2300int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2301{
2302 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002303 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002304
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002305 /* mark driver is unloaded in shmem2 */
2306 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2307 u32 val;
2308 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2309 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2310 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2311 }
2312
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002313 if ((bp->state == BNX2X_STATE_CLOSED) ||
2314 (bp->state == BNX2X_STATE_ERROR)) {
2315 /* We can get here if the driver has been unloaded
2316 * during parity error recovery and is either waiting for a
2317 * leader to complete or for other functions to unload and
2318 * then ifdown has been issued. In this case we want to
2319 * unload and let other functions to complete a recovery
2320 * process.
2321 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002322 bp->recovery_state = BNX2X_RECOVERY_DONE;
2323 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002324 bnx2x_release_leader_lock(bp);
2325 smp_mb();
2326
Merav Sicron51c1a582012-03-18 10:33:38 +00002327 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2328 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002329 return -EINVAL;
2330 }
2331
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002332 /*
2333 * It's important to set the bp->state to the value different from
2334 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2335 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2336 */
2337 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2338 smp_mb();
2339
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002340 /* Stop Tx */
2341 bnx2x_tx_disable(bp);
Merav Sicron65565882012-06-19 07:48:26 +00002342 netdev_reset_tc(bp->dev);
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002343
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002344#ifdef BCM_CNIC
2345 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2346#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002347
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002348 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002349
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002350 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002351
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002352 /* Set ALWAYS_ALIVE bit in shmem */
2353 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2354
2355 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002356
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002358 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002359
2360 /* Cleanup the chip if needed */
2361 if (unload_mode != UNLOAD_RECOVERY)
2362 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002363 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002364 /* Send the UNLOAD_REQUEST to the MCP */
2365 bnx2x_send_unload_req(bp, unload_mode);
2366
2367 /*
2368 * Prevent transactions to host from the functions on the
2369 * engine that doesn't reset global blocks in case of global
2370 * attention once gloabl blocks are reset and gates are opened
2371 * (the engine which leader will perform the recovery
2372 * last).
2373 */
2374 if (!CHIP_IS_E1x(bp))
2375 bnx2x_pf_disable(bp);
2376
2377 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002378 bnx2x_netif_stop(bp, 1);
2379
2380 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002381 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002382
2383 /* Report UNLOAD_DONE to MCP */
2384 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002385 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002386
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002387 /*
2388 * At this stage no more interrupts will arrive so we may safly clean
2389 * the queueable objects here in case they failed to get cleaned so far.
2390 */
2391 bnx2x_squeeze_objects(bp);
2392
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002393 /* There should be no more pending SP commands at this stage */
2394 bp->sp_state = 0;
2395
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002396 bp->port.pmf = 0;
2397
2398 /* Free SKBs, SGEs, TPA pool and driver internals */
2399 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002400 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002401 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002402
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002403 bnx2x_free_mem(bp);
2404
2405 bp->state = BNX2X_STATE_CLOSED;
2406
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002407 /* Check if there are pending parity attentions. If there are - set
2408 * RECOVERY_IN_PROGRESS.
2409 */
2410 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2411 bnx2x_set_reset_in_progress(bp);
2412
2413 /* Set RESET_IS_GLOBAL if needed */
2414 if (global)
2415 bnx2x_set_reset_global(bp);
2416 }
2417
2418
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002419 /* The last driver must disable a "close the gate" if there is no
2420 * parity attention or "process kill" pending.
2421 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002422 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002423 bnx2x_disable_close_the_gate(bp);
2424
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002425 return 0;
2426}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002428int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2429{
2430 u16 pmcsr;
2431
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002432 /* If there is no power capability, silently succeed */
2433 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002434 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002435 return 0;
2436 }
2437
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002438 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2439
2440 switch (state) {
2441 case PCI_D0:
2442 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2443 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2444 PCI_PM_CTRL_PME_STATUS));
2445
2446 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2447 /* delay required during transition out of D3hot */
2448 msleep(20);
2449 break;
2450
2451 case PCI_D3hot:
2452 /* If there are other clients above don't
2453 shut down the power */
2454 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2455 return 0;
2456 /* Don't shut down the power for emulation and FPGA */
2457 if (CHIP_REV_IS_SLOW(bp))
2458 return 0;
2459
2460 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2461 pmcsr |= 3;
2462
2463 if (bp->wol)
2464 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2465
2466 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2467 pmcsr);
2468
2469 /* No more memory access after this point until
2470 * device is brought back to D0.
2471 */
2472 break;
2473
2474 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00002475 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002476 return -EINVAL;
2477 }
2478 return 0;
2479}
2480
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002481/*
2482 * net_device service functions
2483 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002484int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002485{
2486 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002487 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002488 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2489 napi);
2490 struct bnx2x *bp = fp->bp;
2491
2492 while (1) {
2493#ifdef BNX2X_STOP_ON_ERROR
2494 if (unlikely(bp->panic)) {
2495 napi_complete(napi);
2496 return 0;
2497 }
2498#endif
2499
Ariel Elior6383c0b2011-07-14 08:31:57 +00002500 for_each_cos_in_tx_queue(fp, cos)
Merav Sicron65565882012-06-19 07:48:26 +00002501 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2502 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002503
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002504
2505 if (bnx2x_has_rx_work(fp)) {
2506 work_done += bnx2x_rx_int(fp, budget - work_done);
2507
2508 /* must not complete if we consumed full budget */
2509 if (work_done >= budget)
2510 break;
2511 }
2512
2513 /* Fall out from the NAPI loop if needed */
2514 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002515#ifdef BCM_CNIC
2516 /* No need to update SB for FCoE L2 ring as long as
2517 * it's connected to the default SB and the SB
2518 * has been updated when NAPI was scheduled.
2519 */
2520 if (IS_FCOE_FP(fp)) {
2521 napi_complete(napi);
2522 break;
2523 }
2524#endif
2525
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002526 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002527 /* bnx2x_has_rx_work() reads the status block,
2528 * thus we need to ensure that status block indices
2529 * have been actually read (bnx2x_update_fpsb_idx)
2530 * prior to this check (bnx2x_has_rx_work) so that
2531 * we won't write the "newer" value of the status block
2532 * to IGU (if there was a DMA right after
2533 * bnx2x_has_rx_work and if there is no rmb, the memory
2534 * reading (bnx2x_update_fpsb_idx) may be postponed
2535 * to right before bnx2x_ack_sb). In this case there
2536 * will never be another interrupt until there is
2537 * another update of the status block, while there
2538 * is still unhandled work.
2539 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002540 rmb();
2541
2542 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2543 napi_complete(napi);
2544 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00002545 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002546 "Update index to %d\n", fp->fp_hc_idx);
2547 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2548 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002549 IGU_INT_ENABLE, 1);
2550 break;
2551 }
2552 }
2553 }
2554
2555 return work_done;
2556}
2557
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002558/* we split the first BD into headers and data BDs
2559 * to ease the pain of our fellow microcode engineers
2560 * we use one mapping for both BDs
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002561 */
2562static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002563 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002564 struct sw_tx_bd *tx_buf,
2565 struct eth_tx_start_bd **tx_bd, u16 hlen,
2566 u16 bd_prod, int nbd)
2567{
2568 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2569 struct eth_tx_bd *d_tx_bd;
2570 dma_addr_t mapping;
2571 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2572
2573 /* first fix first BD */
2574 h_tx_bd->nbd = cpu_to_le16(nbd);
2575 h_tx_bd->nbytes = cpu_to_le16(hlen);
2576
Merav Sicron51c1a582012-03-18 10:33:38 +00002577 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2578 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002579
2580 /* now get a new data BD
2581 * (after the pbd) and fill it */
2582 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002583 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002584
2585 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2586 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2587
2588 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2589 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2590 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2591
2592 /* this marks the BD as one that has no individual mapping */
2593 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2594
2595 DP(NETIF_MSG_TX_QUEUED,
2596 "TSO split data size is %d (%x:%x)\n",
2597 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2598
2599 /* update tx_bd */
2600 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2601
2602 return bd_prod;
2603}
2604
2605static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2606{
2607 if (fix > 0)
2608 csum = (u16) ~csum_fold(csum_sub(csum,
2609 csum_partial(t_header - fix, fix, 0)));
2610
2611 else if (fix < 0)
2612 csum = (u16) ~csum_fold(csum_add(csum,
2613 csum_partial(t_header, -fix, 0)));
2614
2615 return swab16(csum);
2616}
2617
2618static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2619{
2620 u32 rc;
2621
2622 if (skb->ip_summed != CHECKSUM_PARTIAL)
2623 rc = XMIT_PLAIN;
2624
2625 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002626 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002627 rc = XMIT_CSUM_V6;
2628 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2629 rc |= XMIT_CSUM_TCP;
2630
2631 } else {
2632 rc = XMIT_CSUM_V4;
2633 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2634 rc |= XMIT_CSUM_TCP;
2635 }
2636 }
2637
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002638 if (skb_is_gso_v6(skb))
2639 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2640 else if (skb_is_gso(skb))
2641 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002642
2643 return rc;
2644}
2645
2646#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2647/* check if packet requires linearization (packet is too fragmented)
2648 no need to check fragmentation if page size > 8K (there will be no
2649 violation to FW restrictions) */
2650static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2651 u32 xmit_type)
2652{
2653 int to_copy = 0;
2654 int hlen = 0;
2655 int first_bd_sz = 0;
2656
2657 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2658 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2659
2660 if (xmit_type & XMIT_GSO) {
2661 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2662 /* Check if LSO packet needs to be copied:
2663 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2664 int wnd_size = MAX_FETCH_BD - 3;
2665 /* Number of windows to check */
2666 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2667 int wnd_idx = 0;
2668 int frag_idx = 0;
2669 u32 wnd_sum = 0;
2670
2671 /* Headers length */
2672 hlen = (int)(skb_transport_header(skb) - skb->data) +
2673 tcp_hdrlen(skb);
2674
2675 /* Amount of data (w/o headers) on linear part of SKB*/
2676 first_bd_sz = skb_headlen(skb) - hlen;
2677
2678 wnd_sum = first_bd_sz;
2679
2680 /* Calculate the first sum - it's special */
2681 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2682 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002683 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002684
2685 /* If there was data on linear skb data - check it */
2686 if (first_bd_sz > 0) {
2687 if (unlikely(wnd_sum < lso_mss)) {
2688 to_copy = 1;
2689 goto exit_lbl;
2690 }
2691
2692 wnd_sum -= first_bd_sz;
2693 }
2694
2695 /* Others are easier: run through the frag list and
2696 check all windows */
2697 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2698 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002699 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002700
2701 if (unlikely(wnd_sum < lso_mss)) {
2702 to_copy = 1;
2703 break;
2704 }
2705 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002706 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002707 }
2708 } else {
2709 /* in non-LSO too fragmented packet should always
2710 be linearized */
2711 to_copy = 1;
2712 }
2713 }
2714
2715exit_lbl:
2716 if (unlikely(to_copy))
2717 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00002718 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002719 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2720 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2721
2722 return to_copy;
2723}
2724#endif
2725
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002726static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2727 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002728{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002729 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2730 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2731 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002732 if ((xmit_type & XMIT_GSO_V6) &&
2733 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002734 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002735}
2736
2737/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002738 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002739 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002740 * @skb: packet skb
2741 * @pbd: parse BD
2742 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002743 */
2744static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2745 struct eth_tx_parse_bd_e1x *pbd,
2746 u32 xmit_type)
2747{
2748 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2749 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2750 pbd->tcp_flags = pbd_tcp_flags(skb);
2751
2752 if (xmit_type & XMIT_GSO_V4) {
2753 pbd->ip_id = swab16(ip_hdr(skb)->id);
2754 pbd->tcp_pseudo_csum =
2755 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2756 ip_hdr(skb)->daddr,
2757 0, IPPROTO_TCP, 0));
2758
2759 } else
2760 pbd->tcp_pseudo_csum =
2761 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2762 &ipv6_hdr(skb)->daddr,
2763 0, IPPROTO_TCP, 0));
2764
2765 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2766}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002767
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002768/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002769 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002770 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002771 * @bp: driver handle
2772 * @skb: packet skb
2773 * @parsing_data: data to be updated
2774 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002775 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002776 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002777 */
2778static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002779 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002780{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002781 *parsing_data |=
2782 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2783 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2784 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002785
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002786 if (xmit_type & XMIT_CSUM_TCP) {
2787 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2788 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2789 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002790
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002791 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2792 } else
2793 /* We support checksum offload for TCP and UDP only.
2794 * No need to pass the UDP header length - it's a constant.
2795 */
2796 return skb_transport_header(skb) +
2797 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002798}
2799
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002800static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2801 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2802{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002803 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2804
2805 if (xmit_type & XMIT_CSUM_V4)
2806 tx_start_bd->bd_flags.as_bitfield |=
2807 ETH_TX_BD_FLAGS_IP_CSUM;
2808 else
2809 tx_start_bd->bd_flags.as_bitfield |=
2810 ETH_TX_BD_FLAGS_IPV6;
2811
2812 if (!(xmit_type & XMIT_CSUM_TCP))
2813 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002814}
2815
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002816/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002817 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002818 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002819 * @bp: driver handle
2820 * @skb: packet skb
2821 * @pbd: parse BD to be updated
2822 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002823 */
2824static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2825 struct eth_tx_parse_bd_e1x *pbd,
2826 u32 xmit_type)
2827{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002828 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002829
2830 /* for now NS flag is not used in Linux */
2831 pbd->global_data =
2832 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2833 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2834
2835 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002836 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002837
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002838 hlen += pbd->ip_hlen_w;
2839
2840 /* We support checksum offload for TCP and UDP only */
2841 if (xmit_type & XMIT_CSUM_TCP)
2842 hlen += tcp_hdrlen(skb) / 2;
2843 else
2844 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002845
2846 pbd->total_hlen_w = cpu_to_le16(hlen);
2847 hlen = hlen*2;
2848
2849 if (xmit_type & XMIT_CSUM_TCP) {
2850 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2851
2852 } else {
2853 s8 fix = SKB_CS_OFF(skb); /* signed! */
2854
2855 DP(NETIF_MSG_TX_QUEUED,
2856 "hlen %d fix %d csum before fix %x\n",
2857 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2858
2859 /* HW bug: fixup the CSUM */
2860 pbd->tcp_pseudo_csum =
2861 bnx2x_csum_fix(skb_transport_header(skb),
2862 SKB_CS(skb), fix);
2863
2864 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2865 pbd->tcp_pseudo_csum);
2866 }
2867
2868 return hlen;
2869}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002870
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002871/* called with netif_tx_lock
2872 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2873 * netif_wake_queue()
2874 */
2875netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2876{
2877 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002878
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002879 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002880 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002881 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002882 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002883 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002884 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002885 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002886 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002887 u16 pkt_prod, bd_prod;
Merav Sicron65565882012-06-19 07:48:26 +00002888 int nbd, txq_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889 dma_addr_t mapping;
2890 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2891 int i;
2892 u8 hlen = 0;
2893 __le16 pkt_size = 0;
2894 struct ethhdr *eth;
2895 u8 mac_type = UNICAST_ADDRESS;
2896
2897#ifdef BNX2X_STOP_ON_ERROR
2898 if (unlikely(bp->panic))
2899 return NETDEV_TX_BUSY;
2900#endif
2901
Ariel Elior6383c0b2011-07-14 08:31:57 +00002902 txq_index = skb_get_queue_mapping(skb);
2903 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002904
Ariel Elior6383c0b2011-07-14 08:31:57 +00002905 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2906
Merav Sicron65565882012-06-19 07:48:26 +00002907 txdata = &bp->bnx2x_txq[txq_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002908
2909 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00002910 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002911 txq_index, fp_index, txdata_index); */
2912
Ariel Elior6383c0b2011-07-14 08:31:57 +00002913 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00002914 DP(NETIF_MSG_TX_QUEUED,
2915 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002916 txdata->cid, fp_index, txdata_index, txdata, fp); */
2917
2918 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2919 (skb_shinfo(skb)->nr_frags + 3))) {
Merav Sicron65565882012-06-19 07:48:26 +00002920 txdata->parent_fp->eth_q_stats.driver_xoff++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002921 netif_tx_stop_queue(txq);
2922 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2923 return NETDEV_TX_BUSY;
2924 }
2925
Merav Sicron51c1a582012-03-18 10:33:38 +00002926 DP(NETIF_MSG_TX_QUEUED,
2927 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002928 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002929 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2930
2931 eth = (struct ethhdr *)skb->data;
2932
2933 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2934 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2935 if (is_broadcast_ether_addr(eth->h_dest))
2936 mac_type = BROADCAST_ADDRESS;
2937 else
2938 mac_type = MULTICAST_ADDRESS;
2939 }
2940
2941#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2942 /* First, check if we need to linearize the skb (due to FW
2943 restrictions). No need to check fragmentation if page size > 8K
2944 (there will be no violation to FW restrictions) */
2945 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2946 /* Statistics of linearization */
2947 bp->lin_cnt++;
2948 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002949 DP(NETIF_MSG_TX_QUEUED,
2950 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002951 dev_kfree_skb_any(skb);
2952 return NETDEV_TX_OK;
2953 }
2954 }
2955#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002956 /* Map skb linear data for DMA */
2957 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2958 skb_headlen(skb), DMA_TO_DEVICE);
2959 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002960 DP(NETIF_MSG_TX_QUEUED,
2961 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002962 dev_kfree_skb_any(skb);
2963 return NETDEV_TX_OK;
2964 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002965 /*
2966 Please read carefully. First we use one BD which we mark as start,
2967 then we have a parsing info BD (used for TSO or xsum),
2968 and only then we have the rest of the TSO BDs.
2969 (don't forget to mark the last one as last,
2970 and to unmap only AFTER you write to the BD ...)
2971 And above all, all pdb sizes are in words - NOT DWORDS!
2972 */
2973
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002974 /* get current pkt produced now - advance it just before sending packet
2975 * since mapping of pages may fail and cause packet to be dropped
2976 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002977 pkt_prod = txdata->tx_pkt_prod;
2978 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002979
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002980 /* get a tx_buf and first BD
2981 * tx_start_bd may be changed during SPLIT,
2982 * but first_bd will always stay first
2983 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002984 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2985 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002986 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002987
2988 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002989 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2990 mac_type);
2991
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002992 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002993 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002994
2995 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002996 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002997 tx_buf->skb = skb;
2998 tx_buf->flags = 0;
2999
3000 DP(NETIF_MSG_TX_QUEUED,
3001 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003002 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003003
Jesse Grosseab6d182010-10-20 13:56:03 +00003004 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003005 tx_start_bd->vlan_or_ethertype =
3006 cpu_to_le16(vlan_tx_tag_get(skb));
3007 tx_start_bd->bd_flags.as_bitfield |=
3008 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003009 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003010 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003011
3012 /* turn on parsing and get a BD */
3013 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003014
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00003015 if (xmit_type & XMIT_CSUM)
3016 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003017
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003018 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003019 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003020 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3021 /* Set PBD in checksum offload case */
3022 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003023 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3024 &pbd_e2_parsing_data,
3025 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003026 if (IS_MF_SI(bp)) {
3027 /*
3028 * fill in the MAC addresses in the PBD - for local
3029 * switching
3030 */
3031 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3032 &pbd_e2->src_mac_addr_mid,
3033 &pbd_e2->src_mac_addr_lo,
3034 eth->h_source);
3035 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3036 &pbd_e2->dst_mac_addr_mid,
3037 &pbd_e2->dst_mac_addr_lo,
3038 eth->h_dest);
3039 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003040 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003041 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003042 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3043 /* Set PBD in checksum offload case */
3044 if (xmit_type & XMIT_CSUM)
3045 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003046
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003047 }
3048
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003049 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003050 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3051 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003052 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003053 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3054 pkt_size = tx_start_bd->nbytes;
3055
Merav Sicron51c1a582012-03-18 10:33:38 +00003056 DP(NETIF_MSG_TX_QUEUED,
3057 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003058 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3059 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003060 tx_start_bd->bd_flags.as_bitfield,
3061 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003062
3063 if (xmit_type & XMIT_GSO) {
3064
3065 DP(NETIF_MSG_TX_QUEUED,
3066 "TSO packet len %d hlen %d total len %d tso size %d\n",
3067 skb->len, hlen, skb_headlen(skb),
3068 skb_shinfo(skb)->gso_size);
3069
3070 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3071
3072 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003073 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3074 &tx_start_bd, hlen,
3075 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003076 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003077 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3078 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003079 else
3080 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003081 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003082
3083 /* Set the PBD's parsing_data field if not zero
3084 * (for the chips newer than 57711).
3085 */
3086 if (pbd_e2_parsing_data)
3087 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3088
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003089 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3090
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003091 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003092 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3093 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3094
Eric Dumazet9e903e02011-10-18 21:00:24 +00003095 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3096 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003097 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003098 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003099
Merav Sicron51c1a582012-03-18 10:33:38 +00003100 DP(NETIF_MSG_TX_QUEUED,
3101 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003102
3103 /* we need unmap all buffers already mapped
3104 * for this SKB;
3105 * first_bd->nbd need to be properly updated
3106 * before call to bnx2x_free_tx_pkt
3107 */
3108 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003109 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003110 TX_BD(txdata->tx_pkt_prod),
3111 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003112 return NETDEV_TX_OK;
3113 }
3114
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003115 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003116 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003117 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003118 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003119
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003120 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3121 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003122 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3123 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003124 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003125
3126 DP(NETIF_MSG_TX_QUEUED,
3127 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3128 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3129 le16_to_cpu(tx_data_bd->nbytes));
3130 }
3131
3132 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3133
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003134 /* update with actual num BDs */
3135 first_bd->nbd = cpu_to_le16(nbd);
3136
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003137 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3138
3139 /* now send a tx doorbell, counting the next BD
3140 * if the packet contains or ends with it
3141 */
3142 if (TX_BD_POFF(bd_prod) < nbd)
3143 nbd++;
3144
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003145 /* total_pkt_bytes should be set on the first data BD if
3146 * it's not an LSO packet and there is more than one
3147 * data BD. In this case pkt_size is limited by an MTU value.
3148 * However we prefer to set it for an LSO packet (while we don't
3149 * have to) in order to save some CPU cycles in a none-LSO
3150 * case, when we much more care about them.
3151 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003152 if (total_pkt_bd != NULL)
3153 total_pkt_bd->total_pkt_bytes = pkt_size;
3154
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003155 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003156 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003157 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003158 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3159 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3160 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3161 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003162 if (pbd_e2)
3163 DP(NETIF_MSG_TX_QUEUED,
3164 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3165 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3166 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3167 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3168 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003169 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3170
Tom Herbert2df1a702011-11-28 16:33:37 +00003171 netdev_tx_sent_queue(txq, skb->len);
3172
Willem de Bruijn8373c572012-04-27 09:04:06 +00003173 skb_tx_timestamp(skb);
3174
Ariel Elior6383c0b2011-07-14 08:31:57 +00003175 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003176 /*
3177 * Make sure that the BD data is updated before updating the producer
3178 * since FW might read the BD right after the producer is updated.
3179 * This is only applicable for weak-ordered memory model archs such
3180 * as IA-64. The following barrier is also mandatory since FW will
3181 * assumes packets must have BDs.
3182 */
3183 wmb();
3184
Ariel Elior6383c0b2011-07-14 08:31:57 +00003185 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003186 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003187
Ariel Elior6383c0b2011-07-14 08:31:57 +00003188 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003189
3190 mmiowb();
3191
Ariel Elior6383c0b2011-07-14 08:31:57 +00003192 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003193
Eric Dumazetbc147862012-06-13 09:45:16 +00003194 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003195 netif_tx_stop_queue(txq);
3196
3197 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3198 * ordering of set_bit() in netif_tx_stop_queue() and read of
3199 * fp->bd_tx_cons */
3200 smp_mb();
3201
Merav Sicron65565882012-06-19 07:48:26 +00003202 txdata->parent_fp->eth_q_stats.driver_xoff++;
Eric Dumazetbc147862012-06-13 09:45:16 +00003203 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003204 netif_tx_wake_queue(txq);
3205 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003206 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003207
3208 return NETDEV_TX_OK;
3209}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003210
Ariel Elior6383c0b2011-07-14 08:31:57 +00003211/**
3212 * bnx2x_setup_tc - routine to configure net_device for multi tc
3213 *
3214 * @netdev: net device to configure
3215 * @tc: number of traffic classes to enable
3216 *
3217 * callback connected to the ndo_setup_tc function pointer
3218 */
3219int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3220{
3221 int cos, prio, count, offset;
3222 struct bnx2x *bp = netdev_priv(dev);
3223
3224 /* setup tc must be called under rtnl lock */
3225 ASSERT_RTNL();
3226
3227 /* no traffic classes requested. aborting */
3228 if (!num_tc) {
3229 netdev_reset_tc(dev);
3230 return 0;
3231 }
3232
3233 /* requested to support too many traffic classes */
3234 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003235 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3236 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003237 return -EINVAL;
3238 }
3239
3240 /* declare amount of supported traffic classes */
3241 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003242 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003243 return -EINVAL;
3244 }
3245
3246 /* configure priority to traffic class mapping */
3247 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3248 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003249 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3250 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003251 prio, bp->prio_to_cos[prio]);
3252 }
3253
3254
3255 /* Use this configuration to diffrentiate tc0 from other COSes
3256 This can be used for ets or pfc, and save the effort of setting
3257 up a multio class queue disc or negotiating DCBX with a switch
3258 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003259 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003260 for (prio = 1; prio < 16; prio++) {
3261 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003262 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003263 } */
3264
3265 /* configure traffic class to transmission queue mapping */
3266 for (cos = 0; cos < bp->max_cos; cos++) {
3267 count = BNX2X_NUM_ETH_QUEUES(bp);
Merav Sicron65565882012-06-19 07:48:26 +00003268 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003269 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003270 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3271 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003272 cos, offset, count);
3273 }
3274
3275 return 0;
3276}
3277
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003278/* called with rtnl_lock */
3279int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3280{
3281 struct sockaddr *addr = p;
3282 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003283 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003284
Merav Sicron51c1a582012-03-18 10:33:38 +00003285 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3286 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003287 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003288 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003289
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003290#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003291 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3292 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003293 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003294 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003295 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003296#endif
3297
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003298 if (netif_running(dev)) {
3299 rc = bnx2x_set_eth_mac(bp, false);
3300 if (rc)
3301 return rc;
3302 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003303
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003304 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003305 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3306
3307 if (netif_running(dev))
3308 rc = bnx2x_set_eth_mac(bp, true);
3309
3310 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003311}
3312
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003313static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3314{
3315 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3316 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003317 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003318
3319 /* Common */
3320#ifdef BCM_CNIC
3321 if (IS_FCOE_IDX(fp_index)) {
3322 memset(sb, 0, sizeof(union host_hc_status_block));
3323 fp->status_blk_mapping = 0;
3324
3325 } else {
3326#endif
3327 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003328 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003329 BNX2X_PCI_FREE(sb->e2_sb,
3330 bnx2x_fp(bp, fp_index,
3331 status_blk_mapping),
3332 sizeof(struct host_hc_status_block_e2));
3333 else
3334 BNX2X_PCI_FREE(sb->e1x_sb,
3335 bnx2x_fp(bp, fp_index,
3336 status_blk_mapping),
3337 sizeof(struct host_hc_status_block_e1x));
3338#ifdef BCM_CNIC
3339 }
3340#endif
3341 /* Rx */
3342 if (!skip_rx_queue(bp, fp_index)) {
3343 bnx2x_free_rx_bds(fp);
3344
3345 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3346 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3347 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3348 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3349 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3350
3351 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3352 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3353 sizeof(struct eth_fast_path_rx_cqe) *
3354 NUM_RCQ_BD);
3355
3356 /* SGE ring */
3357 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3358 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3359 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3360 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3361 }
3362
3363 /* Tx */
3364 if (!skip_tx_queue(bp, fp_index)) {
3365 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003366 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003367 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003368
Merav Sicron51c1a582012-03-18 10:33:38 +00003369 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003370 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003371 fp_index, cos, txdata->cid);
3372
3373 BNX2X_FREE(txdata->tx_buf_ring);
3374 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3375 txdata->tx_desc_mapping,
3376 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3377 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003378 }
3379 /* end of fastpath */
3380}
3381
3382void bnx2x_free_fp_mem(struct bnx2x *bp)
3383{
3384 int i;
3385 for_each_queue(bp, i)
3386 bnx2x_free_fp_mem_at(bp, i);
3387}
3388
Eric Dumazet1191cb82012-04-27 21:39:21 +00003389static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003390{
3391 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003392 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003393 bnx2x_fp(bp, index, sb_index_values) =
3394 (__le16 *)status_blk.e2_sb->sb.index_values;
3395 bnx2x_fp(bp, index, sb_running_index) =
3396 (__le16 *)status_blk.e2_sb->sb.running_index;
3397 } else {
3398 bnx2x_fp(bp, index, sb_index_values) =
3399 (__le16 *)status_blk.e1x_sb->sb.index_values;
3400 bnx2x_fp(bp, index, sb_running_index) =
3401 (__le16 *)status_blk.e1x_sb->sb.running_index;
3402 }
3403}
3404
Eric Dumazet1191cb82012-04-27 21:39:21 +00003405/* Returns the number of actually allocated BDs */
3406static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3407 int rx_ring_size)
3408{
3409 struct bnx2x *bp = fp->bp;
3410 u16 ring_prod, cqe_ring_prod;
3411 int i, failure_cnt = 0;
3412
3413 fp->rx_comp_cons = 0;
3414 cqe_ring_prod = ring_prod = 0;
3415
3416 /* This routine is called only during fo init so
3417 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3418 */
3419 for (i = 0; i < rx_ring_size; i++) {
3420 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3421 failure_cnt++;
3422 continue;
3423 }
3424 ring_prod = NEXT_RX_IDX(ring_prod);
3425 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3426 WARN_ON(ring_prod <= (i - failure_cnt));
3427 }
3428
3429 if (failure_cnt)
3430 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3431 i - failure_cnt, fp->index);
3432
3433 fp->rx_bd_prod = ring_prod;
3434 /* Limit the CQE producer by the CQE ring size */
3435 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3436 cqe_ring_prod);
3437 fp->rx_pkt = fp->rx_calls = 0;
3438
3439 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3440
3441 return i - failure_cnt;
3442}
3443
3444static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3445{
3446 int i;
3447
3448 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3449 struct eth_rx_cqe_next_page *nextpg;
3450
3451 nextpg = (struct eth_rx_cqe_next_page *)
3452 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3453 nextpg->addr_hi =
3454 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3455 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3456 nextpg->addr_lo =
3457 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3458 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3459 }
3460}
3461
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003462static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3463{
3464 union host_hc_status_block *sb;
3465 struct bnx2x_fastpath *fp = &bp->fp[index];
3466 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003467 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003468 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003469
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003470#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003471 if (!bp->rx_ring_size &&
3472 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003473 rx_ring_size = MIN_RX_SIZE_NONTPA;
3474 bp->rx_ring_size = rx_ring_size;
3475 } else
3476#endif
David S. Miller8decf862011-09-22 03:23:13 -04003477 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003478 u32 cfg = SHMEM_RD(bp,
3479 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003480
David S. Miller8decf862011-09-22 03:23:13 -04003481 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3482
Mintz Yuvald760fc32012-02-15 02:10:28 +00003483 /* Dercease ring size for 1G functions */
3484 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3485 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3486 rx_ring_size /= 10;
3487
David S. Miller8decf862011-09-22 03:23:13 -04003488 /* allocate at least number of buffers required by FW */
3489 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3490 MIN_RX_SIZE_TPA, rx_ring_size);
3491
3492 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003493 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003494 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003495
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003496 /* Common */
3497 sb = &bnx2x_fp(bp, index, status_blk);
3498#ifdef BCM_CNIC
3499 if (!IS_FCOE_IDX(index)) {
3500#endif
3501 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003502 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003503 BNX2X_PCI_ALLOC(sb->e2_sb,
3504 &bnx2x_fp(bp, index, status_blk_mapping),
3505 sizeof(struct host_hc_status_block_e2));
3506 else
3507 BNX2X_PCI_ALLOC(sb->e1x_sb,
3508 &bnx2x_fp(bp, index, status_blk_mapping),
3509 sizeof(struct host_hc_status_block_e1x));
3510#ifdef BCM_CNIC
3511 }
3512#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003513
3514 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3515 * set shortcuts for it.
3516 */
3517 if (!IS_FCOE_IDX(index))
3518 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003519
3520 /* Tx */
3521 if (!skip_tx_queue(bp, index)) {
3522 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003523 for_each_cos_in_tx_queue(fp, cos) {
Merav Sicron65565882012-06-19 07:48:26 +00003524 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003525
Merav Sicron51c1a582012-03-18 10:33:38 +00003526 DP(NETIF_MSG_IFUP,
3527 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003528 index, cos);
3529
3530 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003531 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003532 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3533 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003534 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003535 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003536 }
3537
3538 /* Rx */
3539 if (!skip_rx_queue(bp, index)) {
3540 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3541 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3542 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3543 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3544 &bnx2x_fp(bp, index, rx_desc_mapping),
3545 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3546
3547 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3548 &bnx2x_fp(bp, index, rx_comp_mapping),
3549 sizeof(struct eth_fast_path_rx_cqe) *
3550 NUM_RCQ_BD);
3551
3552 /* SGE ring */
3553 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3554 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3555 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3556 &bnx2x_fp(bp, index, rx_sge_mapping),
3557 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3558 /* RX BD ring */
3559 bnx2x_set_next_page_rx_bd(fp);
3560
3561 /* CQ ring */
3562 bnx2x_set_next_page_rx_cq(fp);
3563
3564 /* BDs */
3565 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3566 if (ring_size < rx_ring_size)
3567 goto alloc_mem_err;
3568 }
3569
3570 return 0;
3571
3572/* handles low memory cases */
3573alloc_mem_err:
3574 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3575 index, ring_size);
3576 /* FW will drop all packets if queue is not big enough,
3577 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003578 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003579 */
3580 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003581 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003582 /* release memory allocated for this queue */
3583 bnx2x_free_fp_mem_at(bp, index);
3584 return -ENOMEM;
3585 }
3586 return 0;
3587}
3588
3589int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3590{
3591 int i;
3592
3593 /**
3594 * 1. Allocate FP for leading - fatal if error
3595 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003596 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3597 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003598 */
3599
3600 /* leading */
3601 if (bnx2x_alloc_fp_mem_at(bp, 0))
3602 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003603
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003604#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003605 if (!NO_FCOE(bp))
3606 /* FCoE */
Merav Sicron65565882012-06-19 07:48:26 +00003607 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003608 /* we will fail load process instead of mark
3609 * NO_FCOE_FLAG
3610 */
3611 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003612#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003613
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003614 /* RSS */
3615 for_each_nondefault_eth_queue(bp, i)
3616 if (bnx2x_alloc_fp_mem_at(bp, i))
3617 break;
3618
3619 /* handle memory failures */
3620 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3621 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3622
3623 WARN_ON(delta < 0);
3624#ifdef BCM_CNIC
3625 /**
3626 * move non eth FPs next to last eth FP
3627 * must be done in that order
3628 * FCOE_IDX < FWD_IDX < OOO_IDX
3629 */
3630
Ariel Elior6383c0b2011-07-14 08:31:57 +00003631 /* move FCoE fp even NO_FCOE_FLAG is on */
Merav Sicron65565882012-06-19 07:48:26 +00003632 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003633#endif
3634 bp->num_queues -= delta;
3635 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3636 bp->num_queues + delta, bp->num_queues);
3637 }
3638
3639 return 0;
3640}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003641
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003642void bnx2x_free_mem_bp(struct bnx2x *bp)
3643{
3644 kfree(bp->fp);
Merav Sicron65565882012-06-19 07:48:26 +00003645 kfree(bp->bnx2x_txq);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003646 kfree(bp->msix_table);
3647 kfree(bp->ilt);
3648}
3649
3650int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3651{
3652 struct bnx2x_fastpath *fp;
3653 struct msix_entry *tbl;
3654 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003655 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003656
Ariel Elior6383c0b2011-07-14 08:31:57 +00003657 /*
3658 * The biggest MSI-X table we might need is as a maximum number of fast
3659 * path IGU SBs plus default SB (for PF).
3660 */
3661 msix_table_size = bp->igu_sb_cnt + 1;
3662
3663 /* fp array: RSS plus CNIC related L2 queues */
Thomas Meyer01e23742011-11-29 11:08:00 +00003664 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003665 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003666 if (!fp)
3667 goto alloc_err;
3668 bp->fp = fp;
3669
Merav Sicron65565882012-06-19 07:48:26 +00003670 /* Allocate memory for the transmission queues array */
3671 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3672#ifdef BCM_CNIC
3673 bp->bnx2x_txq_size++;
3674#endif
3675 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3676 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3677 if (!bp->bnx2x_txq)
3678 goto alloc_err;
3679
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003680 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003681 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003682 if (!tbl)
3683 goto alloc_err;
3684 bp->msix_table = tbl;
3685
3686 /* ilt */
3687 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3688 if (!ilt)
3689 goto alloc_err;
3690 bp->ilt = ilt;
3691
3692 return 0;
3693alloc_err:
3694 bnx2x_free_mem_bp(bp);
3695 return -ENOMEM;
3696
3697}
3698
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003699int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003700{
3701 struct bnx2x *bp = netdev_priv(dev);
3702
3703 if (unlikely(!netif_running(dev)))
3704 return 0;
3705
3706 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3707 return bnx2x_nic_load(bp, LOAD_NORMAL);
3708}
3709
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003710int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3711{
3712 u32 sel_phy_idx = 0;
3713 if (bp->link_params.num_phys <= 1)
3714 return INT_PHY;
3715
3716 if (bp->link_vars.link_up) {
3717 sel_phy_idx = EXT_PHY1;
3718 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3719 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3720 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3721 sel_phy_idx = EXT_PHY2;
3722 } else {
3723
3724 switch (bnx2x_phy_selection(&bp->link_params)) {
3725 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3726 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3727 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3728 sel_phy_idx = EXT_PHY1;
3729 break;
3730 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3731 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3732 sel_phy_idx = EXT_PHY2;
3733 break;
3734 }
3735 }
3736
3737 return sel_phy_idx;
3738
3739}
3740int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3741{
3742 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3743 /*
3744 * The selected actived PHY is always after swapping (in case PHY
3745 * swapping is enabled). So when swapping is enabled, we need to reverse
3746 * the configuration
3747 */
3748
3749 if (bp->link_params.multi_phy_config &
3750 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3751 if (sel_phy_idx == EXT_PHY1)
3752 sel_phy_idx = EXT_PHY2;
3753 else if (sel_phy_idx == EXT_PHY2)
3754 sel_phy_idx = EXT_PHY1;
3755 }
3756 return LINK_CONFIG_IDX(sel_phy_idx);
3757}
3758
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003759#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3760int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3761{
3762 struct bnx2x *bp = netdev_priv(dev);
3763 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3764
3765 switch (type) {
3766 case NETDEV_FCOE_WWNN:
3767 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3768 cp->fcoe_wwn_node_name_lo);
3769 break;
3770 case NETDEV_FCOE_WWPN:
3771 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3772 cp->fcoe_wwn_port_name_lo);
3773 break;
3774 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003775 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003776 return -EINVAL;
3777 }
3778
3779 return 0;
3780}
3781#endif
3782
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003783/* called with rtnl_lock */
3784int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3785{
3786 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003787
3788 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003789 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003790 return -EAGAIN;
3791 }
3792
3793 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00003794 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3795 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003796 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003797 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003798
3799 /* This does not race with packet allocation
3800 * because the actual alloc size is
3801 * only updated as part of load
3802 */
3803 dev->mtu = new_mtu;
3804
Michał Mirosław66371c42011-04-12 09:38:23 +00003805 return bnx2x_reload_if_running(dev);
3806}
3807
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003808netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003809 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003810{
3811 struct bnx2x *bp = netdev_priv(dev);
3812
3813 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003814 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003815 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003816 features &= ~NETIF_F_GRO;
3817 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003818
3819 return features;
3820}
3821
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003822int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003823{
3824 struct bnx2x *bp = netdev_priv(dev);
3825 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003826 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003827
3828 if (features & NETIF_F_LRO)
3829 flags |= TPA_ENABLE_FLAG;
3830 else
3831 flags &= ~TPA_ENABLE_FLAG;
3832
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003833 if (features & NETIF_F_GRO)
3834 flags |= GRO_ENABLE_FLAG;
3835 else
3836 flags &= ~GRO_ENABLE_FLAG;
3837
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003838 if (features & NETIF_F_LOOPBACK) {
3839 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3840 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3841 bnx2x_reload = true;
3842 }
3843 } else {
3844 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3845 bp->link_params.loopback_mode = LOOPBACK_NONE;
3846 bnx2x_reload = true;
3847 }
3848 }
3849
Michał Mirosław66371c42011-04-12 09:38:23 +00003850 if (flags ^ bp->flags) {
3851 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003852 bnx2x_reload = true;
3853 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003854
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003855 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003856 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3857 return bnx2x_reload_if_running(dev);
3858 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003859 }
3860
Michał Mirosław66371c42011-04-12 09:38:23 +00003861 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003862}
3863
3864void bnx2x_tx_timeout(struct net_device *dev)
3865{
3866 struct bnx2x *bp = netdev_priv(dev);
3867
3868#ifdef BNX2X_STOP_ON_ERROR
3869 if (!bp->panic)
3870 bnx2x_panic();
3871#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003872
3873 smp_mb__before_clear_bit();
3874 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3875 smp_mb__after_clear_bit();
3876
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003877 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003878 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003879}
3880
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003881int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3882{
3883 struct net_device *dev = pci_get_drvdata(pdev);
3884 struct bnx2x *bp;
3885
3886 if (!dev) {
3887 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3888 return -ENODEV;
3889 }
3890 bp = netdev_priv(dev);
3891
3892 rtnl_lock();
3893
3894 pci_save_state(pdev);
3895
3896 if (!netif_running(dev)) {
3897 rtnl_unlock();
3898 return 0;
3899 }
3900
3901 netif_device_detach(dev);
3902
3903 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3904
3905 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3906
3907 rtnl_unlock();
3908
3909 return 0;
3910}
3911
3912int bnx2x_resume(struct pci_dev *pdev)
3913{
3914 struct net_device *dev = pci_get_drvdata(pdev);
3915 struct bnx2x *bp;
3916 int rc;
3917
3918 if (!dev) {
3919 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3920 return -ENODEV;
3921 }
3922 bp = netdev_priv(dev);
3923
3924 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003925 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003926 return -EAGAIN;
3927 }
3928
3929 rtnl_lock();
3930
3931 pci_restore_state(pdev);
3932
3933 if (!netif_running(dev)) {
3934 rtnl_unlock();
3935 return 0;
3936 }
3937
3938 bnx2x_set_power_state(bp, PCI_D0);
3939 netif_device_attach(dev);
3940
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003941 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3942
3943 rtnl_unlock();
3944
3945 return rc;
3946}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003947
3948
3949void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3950 u32 cid)
3951{
3952 /* ustorm cxt validation */
3953 cxt->ustorm_ag_context.cdu_usage =
3954 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3955 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3956 /* xcontext validation */
3957 cxt->xstorm_ag_context.cdu_reserved =
3958 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3959 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3960}
3961
Eric Dumazet1191cb82012-04-27 21:39:21 +00003962static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3963 u8 fw_sb_id, u8 sb_index,
3964 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003965{
3966
3967 u32 addr = BAR_CSTRORM_INTMEM +
3968 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3969 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00003970 DP(NETIF_MSG_IFUP,
3971 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3972 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003973}
3974
Eric Dumazet1191cb82012-04-27 21:39:21 +00003975static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3976 u16 fw_sb_id, u8 sb_index,
3977 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003978{
3979 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3980 u32 addr = BAR_CSTRORM_INTMEM +
3981 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3982 u16 flags = REG_RD16(bp, addr);
3983 /* clear and set */
3984 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3985 flags |= enable_flag;
3986 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00003987 DP(NETIF_MSG_IFUP,
3988 "port %x fw_sb_id %d sb_index %d disable %d\n",
3989 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003990}
3991
3992void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3993 u8 sb_index, u8 disable, u16 usec)
3994{
3995 int port = BP_PORT(bp);
3996 u8 ticks = usec / BNX2X_BTR;
3997
3998 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3999
4000 disable = disable ? 1 : (usec ? 0 : 1);
4001 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4002}