blob: ad0743bf4bdece7ac3cbc17e98f881e92530b22f [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000026#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000027#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000028#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000029#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000030
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000032
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000033/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000041 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000044 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Ariel Elior72754082011-11-13 04:34:31 +000049
50 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi;
52
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000053 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000056}
57
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030058int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
59
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000060/* free skb in the packet ring at pos idx
61 * return idx of last bd freed
62 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000063static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000064 u16 idx, unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000066{
Ariel Elior6383c0b2011-07-14 08:31:57 +000067 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000068 struct eth_tx_start_bd *tx_start_bd;
69 struct eth_tx_bd *tx_data_bd;
70 struct sk_buff *skb = tx_buf->skb;
71 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
72 int nbd;
73
74 /* prefetch skb end pointer to speedup dev_kfree_skb() */
75 prefetch(&skb->end);
76
Merav Sicron51c1a582012-03-18 10:33:38 +000077 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +000078 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000079
80 /* unmap first bd */
Ariel Elior6383c0b2011-07-14 08:31:57 +000081 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000082 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000083 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000084
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030085
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000086 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87#ifdef BNX2X_STOP_ON_ERROR
88 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 BNX2X_ERR("BAD nbd!\n");
90 bnx2x_panic();
91 }
92#endif
93 new_cons = nbd + tx_buf->first_bd;
94
95 /* Get the next bd */
96 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97
98 /* Skip a parse bd... */
99 --nbd;
100 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101
102 /* ...and the TSO split header bd since they have no mapping */
103 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
104 --nbd;
105 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
106 }
107
108 /* now free frags */
109 while (nbd > 0) {
110
Ariel Elior6383c0b2011-07-14 08:31:57 +0000111 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000112 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
114 if (--nbd)
115 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 }
117
118 /* release skb */
119 WARN_ON(!skb);
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000120 if (likely(skb)) {
Tom Herbert2df1a702011-11-28 16:33:37 +0000121 (*pkts_compl)++;
122 (*bytes_compl) += skb->len;
123 }
Yuval Mintzd8290ae2012-03-18 10:33:37 +0000124
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000125 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000126 tx_buf->first_bd = 0;
127 tx_buf->skb = NULL;
128
129 return new_cons;
130}
131
Ariel Elior6383c0b2011-07-14 08:31:57 +0000132int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000133{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000134 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000135 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000136 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000137
138#ifdef BNX2X_STOP_ON_ERROR
139 if (unlikely(bp->panic))
140 return -1;
141#endif
142
Ariel Elior6383c0b2011-07-14 08:31:57 +0000143 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000146
147 while (sw_cons != hw_cons) {
148 u16 pkt_cons;
149
150 pkt_cons = TX_BD(sw_cons);
151
Merav Sicron51c1a582012-03-18 10:33:38 +0000152 DP(NETIF_MSG_TX_DONE,
153 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000154 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000155
Tom Herbert2df1a702011-11-28 16:33:37 +0000156 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 &pkts_compl, &bytes_compl);
158
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000159 sw_cons++;
160 }
161
Tom Herbert2df1a702011-11-28 16:33:37 +0000162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
163
Ariel Elior6383c0b2011-07-14 08:31:57 +0000164 txdata->tx_pkt_cons = sw_cons;
165 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000166
167 /* Need to make the tx_bd_cons update visible to start_xmit()
168 * before checking for netif_tx_queue_stopped(). Without the
169 * memory barrier, there is a small possibility that
170 * start_xmit() will miss it and cause the queue to be stopped
171 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300172 * On the other hand we need an rmb() here to ensure the proper
173 * ordering of bit testing in the following
174 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000175 */
176 smp_mb();
177
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000178 if (unlikely(netif_tx_queue_stopped(txq))) {
179 /* Taking tx_lock() is needed to prevent reenabling the queue
180 * while it's empty. This could have happen if rx_action() gets
181 * suspended in bnx2x_tx_int() after the condition before
182 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
183 *
184 * stops the queue->sees fresh tx_bd_cons->releases the queue->
185 * sends some packets consuming the whole queue again->
186 * stops the queue
187 */
188
189 __netif_tx_lock(txq, smp_processor_id());
190
191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194 netif_tx_wake_queue(txq);
195
196 __netif_tx_unlock(txq);
197 }
198 return 0;
199}
200
201static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
202 u16 idx)
203{
204 u16 last_max = fp->last_max_sge;
205
206 if (SUB_S16(idx, last_max) > 0)
207 fp->last_max_sge = idx;
208}
209
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000210static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
211 u16 sge_len,
212 struct eth_end_agg_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000213{
214 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000215 u16 last_max, last_elem, first_elem;
216 u16 delta = 0;
217 u16 i;
218
219 if (!sge_len)
220 return;
221
222 /* First mark all used pages */
223 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300224 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000225 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000226
227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000228 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000229
230 /* Here we assume that the last SGE index is the biggest */
231 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000232 bnx2x_update_last_max_sge(fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000233 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000234
235 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300236 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238
239 /* If ring is not full */
240 if (last_elem + 1 != first_elem)
241 last_elem++;
242
243 /* Now update the prod */
244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 if (likely(fp->sge_mask[i]))
246 break;
247
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300248 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000250 }
251
252 if (delta > 0) {
253 fp->rx_sge_prod += delta;
254 /* clear page-end entries */
255 bnx2x_clear_sge_mask_next_elems(fp);
256 }
257
258 DP(NETIF_MSG_RX_STATUS,
259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
260 fp->last_max_sge, fp->rx_sge_prod);
261}
262
Eric Dumazete52fcb22011-11-14 06:05:34 +0000263/* Set Toeplitz hash value in the skb using the value from the
264 * CQE (calculated by HW).
265 */
266static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe)
268{
269 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 return le32_to_cpu(cqe->rss_hash_result);
273 return 0;
274}
275
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000276static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000277 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300278 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279{
280 struct bnx2x *bp = fp->bp;
281 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
284 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300285 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000287
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300288 /* print error if current state != stop */
289 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000290 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
291
Eric Dumazete52fcb22011-11-14 06:05:34 +0000292 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300293 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000294 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300295 fp->rx_buf_size, DMA_FROM_DEVICE);
296 /*
297 * ...if it fails - move the skb from the consumer to the producer
298 * and set the current aggregation state as ERROR to drop it
299 * when TPA_STOP arrives.
300 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000301
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300302 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000304 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300305 tpa_info->tpa_state = BNX2X_TPA_ERROR;
306 return;
307 }
308
Eric Dumazete52fcb22011-11-14 06:05:34 +0000309 /* move empty data from pool to prod */
310 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300311 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000312 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000313 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
315
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300316 /* move partial skb from cons to pool (don't unmap yet) */
317 *first_buf = *cons_rx_buf;
318
319 /* mark bin state as START */
320 tpa_info->parsing_flags =
321 le16_to_cpu(cqe->pars_flags.flags);
322 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000327 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page =
330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331 tpa_info->gro_size = gro_size;
332 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300333
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000334#ifdef BNX2X_STOP_ON_ERROR
335 fp->tpa_queue_used |= (1 << queue);
336#ifdef _ASM_GENERIC_INT_L64_H
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
338#else
339 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
340#endif
341 fp->tpa_queue_used);
342#endif
343}
344
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000345/* Timestamp option length allowed for TPA aggregation:
346 *
347 * nop nop kind length echo val
348 */
349#define TPA_TSTAMP_OPT_LEN 12
350/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000351 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000352 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000353 * @bp: driver handle
354 * @parsing_flags: parsing flags from the START CQE
355 * @len_on_bd: total length of the first packet for the
356 * aggregation.
357 *
358 * Approximate value of the MSS for this aggregation calculated using
359 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000360 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000361static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
362 u16 len_on_bd)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000363{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300364 /*
365 * TPA arrgregation won't have either IP options or TCP options
366 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000367 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300368 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
369
370 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371 PRS_FLAG_OVERETH_IPV6)
372 hdrs_len += sizeof(struct ipv6hdr);
373 else /* IPv4 */
374 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000375
376
377 /* Check if there was a TCP timestamp, if there is it's will
378 * always be 12 bytes length: nop nop kind length echo val.
379 *
380 * Otherwise FW would close the aggregation.
381 */
382 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383 hdrs_len += TPA_TSTAMP_OPT_LEN;
384
385 return len_on_bd - hdrs_len;
386}
387
Eric Dumazet1191cb82012-04-27 21:39:21 +0000388static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
390{
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 dma_addr_t mapping;
395
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
398 return -ENOMEM;
399 }
400
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
406 return -ENOMEM;
407 }
408
409 sw_buf->page = page;
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415 return 0;
416}
417
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000418static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000419 struct bnx2x_agg_info *tpa_info,
420 u16 pages,
421 struct sk_buff *skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300422 struct eth_end_agg_rx_cqe *cqe,
423 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000424{
425 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000426 u32 i, frag_len, frag_size;
427 int err, j, frag_id = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300428 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000429 u16 full_page = 0, gro_size = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000430
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300431 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000432
433 if (fp->mode == TPA_MODE_GRO) {
434 gro_size = tpa_info->gro_size;
435 full_page = tpa_info->full_page;
436 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000437
438 /* This is needed in order to enable forwarding support */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000439 if (frag_size) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300440 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000442
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000443 /* set for GRO */
444 if (fp->mode == TPA_MODE_GRO)
445 skb_shinfo(skb)->gso_type =
446 (GET_FLAG(tpa_info->parsing_flags,
447 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448 PRS_FLAG_OVERETH_IPV6) ?
449 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
450 }
451
452
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000453#ifdef BNX2X_STOP_ON_ERROR
454 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
456 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300457 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000458 bnx2x_panic();
459 return -EINVAL;
460 }
461#endif
462
463 /* Run through the SGL and compose the fragmented skb */
464 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300465 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000466
467 /* FW gives the indices of the SGE as if the ring is an array
468 (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000469 if (fp->mode == TPA_MODE_GRO)
470 frag_len = min_t(u32, frag_size, (u32)full_page);
471 else /* LRO */
472 frag_len = min_t(u32, frag_size,
473 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
474
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000475 rx_pg = &fp->rx_page_ring[sge_idx];
476 old_rx_pg = *rx_pg;
477
478 /* If we fail to allocate a substitute page, we simply stop
479 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 if (unlikely(err)) {
482 fp->eth_q_stats.rx_skb_alloc_failed++;
483 return err;
484 }
485
486 /* Unmap the page as we r going to pass it to the stack */
487 dma_unmap_page(&bp->pdev->dev,
488 dma_unmap_addr(&old_rx_pg, mapping),
489 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000490 /* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000491 if (fp->mode == TPA_MODE_LRO)
492 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
493 else { /* GRO */
494 int rem;
495 int offset = 0;
496 for (rem = frag_len; rem > 0; rem -= gro_size) {
497 int len = rem > gro_size ? gro_size : rem;
498 skb_fill_page_desc(skb, frag_id++,
499 old_rx_pg.page, offset, len);
500 if (offset)
501 get_page(old_rx_pg.page);
502 offset += len;
503 }
504 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000505
506 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000507 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000508 skb->len += frag_len;
509
510 frag_size -= frag_len;
511 }
512
513 return 0;
514}
515
Eric Dumazet1191cb82012-04-27 21:39:21 +0000516static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 struct bnx2x_agg_info *tpa_info,
518 u16 pages,
519 struct eth_end_agg_rx_cqe *cqe,
520 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000521{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000523 u8 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300524 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000525 struct sk_buff *skb = NULL;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000526 u8 *new_data, *data = rx_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 u8 old_tpa_state = tpa_info->tpa_state;
528
529 tpa_info->tpa_state = BNX2X_TPA_STOP;
530
531 /* If we there was an error during the handling of the TPA_START -
532 * drop this aggregation.
533 */
534 if (old_tpa_state == BNX2X_TPA_ERROR)
535 goto drop;
536
Eric Dumazete52fcb22011-11-14 06:05:34 +0000537 /* Try to allocate the new data */
538 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000539
540 /* Unmap skb in the pool anyway, as we are going to change
541 pool entry status to BNX2X_TPA_STOP even if new skb allocation
542 fails. */
543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800544 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000545 if (likely(new_data))
Eric Dumazetd3836f22012-04-27 00:33:38 +0000546 skb = build_skb(data, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000547
Eric Dumazete52fcb22011-11-14 06:05:34 +0000548 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000549#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800550 if (pad + len > fp->rx_buf_size) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000551 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800552 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000553 bnx2x_panic();
554 return;
555 }
556#endif
557
Eric Dumazete52fcb22011-11-14 06:05:34 +0000558 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000559 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000560 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000561
562 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
564
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000565 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566 skb, cqe, cqe_idx)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300567 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000569 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000570 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000571 DP(NETIF_MSG_RX_STATUS,
572 "Failed to allocate new pages - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000573 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000574 }
575
576
Eric Dumazete52fcb22011-11-14 06:05:34 +0000577 /* put new data in bin */
578 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000579
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300580 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000581 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000582 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300583drop:
584 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000588}
589
Eric Dumazet1191cb82012-04-27 21:39:21 +0000590static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
592{
593 u8 *data;
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 dma_addr_t mapping;
597
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
600 return -ENOMEM;
601
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 fp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 kfree(data);
607 BNX2X_ERR("Can't map rx data\n");
608 return -ENOMEM;
609 }
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617 return 0;
618}
619
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000620
621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
622{
623 struct bnx2x *bp = fp->bp;
624 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
625 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
626 int rx_pkt = 0;
627
628#ifdef BNX2X_STOP_ON_ERROR
629 if (unlikely(bp->panic))
630 return 0;
631#endif
632
633 /* CQ "next element" is of the size of the regular element,
634 that's why it's ok here */
635 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
636 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
637 hw_comp_cons++;
638
639 bd_cons = fp->rx_bd_cons;
640 bd_prod = fp->rx_bd_prod;
641 bd_prod_fw = bd_prod;
642 sw_comp_cons = fp->rx_comp_cons;
643 sw_comp_prod = fp->rx_comp_prod;
644
645 /* Memory barrier necessary as speculative reads of the rx
646 * buffer can be ahead of the index in the status block
647 */
648 rmb();
649
650 DP(NETIF_MSG_RX_STATUS,
651 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
652 fp->index, hw_comp_cons, sw_comp_cons);
653
654 while (sw_comp_cons != hw_comp_cons) {
655 struct sw_rx_bd *rx_buf = NULL;
656 struct sk_buff *skb;
657 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300658 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000659 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300660 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000661 u16 len, pad, queue;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000662 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000663
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300664#ifdef BNX2X_STOP_ON_ERROR
665 if (unlikely(bp->panic))
666 return 0;
667#endif
668
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669 comp_ring_cons = RCQ_BD(sw_comp_cons);
670 bd_prod = RX_BD(bd_prod);
671 bd_cons = RX_BD(bd_cons);
672
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000673 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300674 cqe_fp = &cqe->fast_path_cqe;
675 cqe_fp_flags = cqe_fp->type_error_flags;
676 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000677
Merav Sicron51c1a582012-03-18 10:33:38 +0000678 DP(NETIF_MSG_RX_STATUS,
679 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
680 CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300681 cqe_fp_flags, cqe_fp->status_flags,
682 le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000683 le16_to_cpu(cqe_fp->vlan_tag),
684 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000685
686 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300687 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000688 bnx2x_sp_event(fp, cqe);
689 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000690 }
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000691
Eric Dumazete52fcb22011-11-14 06:05:34 +0000692 rx_buf = &fp->rx_buf_ring[bd_cons];
693 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000694
Eric Dumazete52fcb22011-11-14 06:05:34 +0000695 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000696 struct bnx2x_agg_info *tpa_info;
697 u16 frag_size, pages;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300698#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000699 /* sanity check */
700 if (fp->disable_tpa &&
701 (CQE_TYPE_START(cqe_fp_type) ||
702 CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron51c1a582012-03-18 10:33:38 +0000703 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
Eric Dumazete52fcb22011-11-14 06:05:34 +0000704 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300705#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000706
Eric Dumazete52fcb22011-11-14 06:05:34 +0000707 if (CQE_TYPE_START(cqe_fp_type)) {
708 u16 queue = cqe_fp->queue_index;
709 DP(NETIF_MSG_RX_STATUS,
710 "calling tpa_start on queue %d\n",
711 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000712
Eric Dumazete52fcb22011-11-14 06:05:34 +0000713 bnx2x_tpa_start(fp, queue,
714 bd_cons, bd_prod,
715 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000716
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000717 goto next_rx;
718
719 }
720 queue = cqe->end_agg_cqe.queue_index;
721 tpa_info = &fp->tpa_info[queue];
722 DP(NETIF_MSG_RX_STATUS,
723 "calling tpa_stop on queue %d\n",
724 queue);
725
726 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
727 tpa_info->len_on_bd;
728
729 if (fp->mode == TPA_MODE_GRO)
730 pages = (frag_size + tpa_info->full_page - 1) /
731 tpa_info->full_page;
732 else
733 pages = SGE_PAGE_ALIGN(frag_size) >>
734 SGE_PAGE_SHIFT;
735
736 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
737 &cqe->end_agg_cqe, comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000738#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000739 if (bp->panic)
740 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741#endif
742
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000743 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
744 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000745 }
746 /* non TPA */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +0000747 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000748 pad = cqe_fp->placement_offset;
749 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000750 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000751 pad + RX_COPY_THRESH,
752 DMA_FROM_DEVICE);
753 pad += NET_SKB_PAD;
754 prefetch(data + pad); /* speedup eth_type_trans() */
755 /* is this an error packet? */
756 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000757 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000758 "ERROR flags %x rx packet %u\n",
759 cqe_fp_flags, sw_comp_cons);
760 fp->eth_q_stats.rx_err_discard_pkt++;
761 goto reuse_rx;
762 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000763
Eric Dumazete52fcb22011-11-14 06:05:34 +0000764 /* Since we don't have a jumbo ring
765 * copy small packets if mtu > 1500
766 */
767 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
768 (len <= RX_COPY_THRESH)) {
769 skb = netdev_alloc_skb_ip_align(bp->dev, len);
770 if (skb == NULL) {
Merav Sicron51c1a582012-03-18 10:33:38 +0000771 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000772 "ERROR packet dropped because of alloc failure\n");
773 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000774 goto reuse_rx;
775 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000776 memcpy(skb->data, data + pad, len);
777 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
778 } else {
779 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000780 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000781 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800782 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000783 DMA_FROM_DEVICE);
Eric Dumazetd3836f22012-04-27 00:33:38 +0000784 skb = build_skb(data, 0);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000785 if (unlikely(!skb)) {
786 kfree(data);
787 fp->eth_q_stats.rx_skb_alloc_failed++;
788 goto next_rx;
789 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000790 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000791 } else {
Merav Sicron51c1a582012-03-18 10:33:38 +0000792 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
793 "ERROR packet dropped because of alloc failure\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000794 fp->eth_q_stats.rx_skb_alloc_failed++;
795reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000796 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000797 goto next_rx;
798 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000799 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000800
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000801 skb_put(skb, len);
802 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000803
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000804 /* Set Toeplitz hash for a none-LRO skb */
805 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000806
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000807 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000808
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000809 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300810
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000811 if (likely(BNX2X_RX_CSUM_OK(cqe)))
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
813 else
814 fp->eth_q_stats.hw_csum_err++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000815 }
816
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000817 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300819 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
820 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000821 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300822 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000823 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000824
825
826next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000827 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000828
829 bd_cons = NEXT_RX_IDX(bd_cons);
830 bd_prod = NEXT_RX_IDX(bd_prod);
831 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
832 rx_pkt++;
833next_cqe:
834 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
835 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
836
837 if (rx_pkt == budget)
838 break;
839 } /* while */
840
841 fp->rx_bd_cons = bd_cons;
842 fp->rx_bd_prod = bd_prod_fw;
843 fp->rx_comp_cons = sw_comp_cons;
844 fp->rx_comp_prod = sw_comp_prod;
845
846 /* Update producers */
847 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
848 fp->rx_sge_prod);
849
850 fp->rx_pkt += rx_pkt;
851 fp->rx_calls++;
852
853 return rx_pkt;
854}
855
856static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
857{
858 struct bnx2x_fastpath *fp = fp_cookie;
859 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000860 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000861
Merav Sicron51c1a582012-03-18 10:33:38 +0000862 DP(NETIF_MSG_INTR,
863 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000864 fp->index, fp->fw_sb_id, fp->igu_sb_id);
865 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000866
867#ifdef BNX2X_STOP_ON_ERROR
868 if (unlikely(bp->panic))
869 return IRQ_HANDLED;
870#endif
871
872 /* Handle Rx and Tx according to MSI-X vector */
873 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000874
875 for_each_cos_in_tx_queue(fp, cos)
876 prefetch(fp->txdata[cos].tx_cons_sb);
877
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000878 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000879 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
880
881 return IRQ_HANDLED;
882}
883
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884/* HW Lock for shared dual port PHYs */
885void bnx2x_acquire_phy_lock(struct bnx2x *bp)
886{
887 mutex_lock(&bp->port.phy_mutex);
888
889 if (bp->port.need_hw_lock)
890 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
891}
892
893void bnx2x_release_phy_lock(struct bnx2x *bp)
894{
895 if (bp->port.need_hw_lock)
896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
897
898 mutex_unlock(&bp->port.phy_mutex);
899}
900
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800901/* calculates MF speed according to current linespeed and MF configuration */
902u16 bnx2x_get_mf_speed(struct bnx2x *bp)
903{
904 u16 line_speed = bp->link_vars.line_speed;
905 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000906 u16 maxCfg = bnx2x_extract_max_cfg(bp,
907 bp->mf_config[BP_VN(bp)]);
908
909 /* Calculate the current MAX line speed limit for the MF
910 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800911 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000912 if (IS_MF_SI(bp))
913 line_speed = (line_speed * maxCfg) / 100;
914 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800915 u16 vn_max_rate = maxCfg * 100;
916
917 if (vn_max_rate < line_speed)
918 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000919 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800920 }
921
922 return line_speed;
923}
924
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000925/**
926 * bnx2x_fill_report_data - fill link report data to report
927 *
928 * @bp: driver handle
929 * @data: link state to update
930 *
931 * It uses a none-atomic bit operations because is called under the mutex.
932 */
Eric Dumazet1191cb82012-04-27 21:39:21 +0000933static void bnx2x_fill_report_data(struct bnx2x *bp,
934 struct bnx2x_link_report_data *data)
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000935{
936 u16 line_speed = bnx2x_get_mf_speed(bp);
937
938 memset(data, 0, sizeof(*data));
939
940 /* Fill the report data: efective line speed */
941 data->line_speed = line_speed;
942
943 /* Link is down */
944 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
945 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
946 &data->link_report_flags);
947
948 /* Full DUPLEX */
949 if (bp->link_vars.duplex == DUPLEX_FULL)
950 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
951
952 /* Rx Flow Control is ON */
953 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
954 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
955
956 /* Tx Flow Control is ON */
957 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
958 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
959}
960
961/**
962 * bnx2x_link_report - report link status to OS.
963 *
964 * @bp: driver handle
965 *
966 * Calls the __bnx2x_link_report() under the same locking scheme
967 * as a link/PHY state managing code to ensure a consistent link
968 * reporting.
969 */
970
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000971void bnx2x_link_report(struct bnx2x *bp)
972{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000973 bnx2x_acquire_phy_lock(bp);
974 __bnx2x_link_report(bp);
975 bnx2x_release_phy_lock(bp);
976}
977
978/**
979 * __bnx2x_link_report - report link status to OS.
980 *
981 * @bp: driver handle
982 *
983 * None atomic inmlementation.
984 * Should be called under the phy_lock.
985 */
986void __bnx2x_link_report(struct bnx2x *bp)
987{
988 struct bnx2x_link_report_data cur_data;
989
990 /* reread mf_cfg */
991 if (!CHIP_IS_E1(bp))
992 bnx2x_read_mf_cfg(bp);
993
994 /* Read the current link report info */
995 bnx2x_fill_report_data(bp, &cur_data);
996
997 /* Don't report link down or exactly the same link status twice */
998 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
999 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1000 &bp->last_reported_link.link_report_flags) &&
1001 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1002 &cur_data.link_report_flags)))
1003 return;
1004
1005 bp->link_cnt++;
1006
1007 /* We are going to report a new link parameters now -
1008 * remember the current data for the next time.
1009 */
1010 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1011
1012 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1013 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001014 netif_carrier_off(bp->dev);
1015 netdev_err(bp->dev, "NIC Link is Down\n");
1016 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001017 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001018 const char *duplex;
1019 const char *flow;
1020
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001021 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001022
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001023 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1024 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001025 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001026 else
Joe Perches94f05b02011-08-14 12:16:20 +00001027 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001028
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001029 /* Handle the FC at the end so that only these flags would be
1030 * possibly set. This way we may easily check if there is no FC
1031 * enabled.
1032 */
1033 if (cur_data.link_report_flags) {
1034 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1035 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001036 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1037 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +00001038 flow = "ON - receive & transmit";
1039 else
1040 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001041 } else {
Joe Perches94f05b02011-08-14 12:16:20 +00001042 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001043 }
Joe Perches94f05b02011-08-14 12:16:20 +00001044 } else {
1045 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001046 }
Joe Perches94f05b02011-08-14 12:16:20 +00001047 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1048 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001049 }
1050}
1051
Eric Dumazet1191cb82012-04-27 21:39:21 +00001052static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1053{
1054 int i;
1055
1056 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1057 struct eth_rx_sge *sge;
1058
1059 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1060 sge->addr_hi =
1061 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1062 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1063
1064 sge->addr_lo =
1065 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1067 }
1068}
1069
1070static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, int last)
1072{
1073 int i;
1074
1075 for (i = 0; i < last; i++) {
1076 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1077 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1078 u8 *data = first_buf->data;
1079
1080 if (data == NULL) {
1081 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1082 continue;
1083 }
1084 if (tpa_info->tpa_state == BNX2X_TPA_START)
1085 dma_unmap_single(&bp->pdev->dev,
1086 dma_unmap_addr(first_buf, mapping),
1087 fp->rx_buf_size, DMA_FROM_DEVICE);
1088 kfree(data);
1089 first_buf->data = NULL;
1090 }
1091}
1092
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001093void bnx2x_init_rx_rings(struct bnx2x *bp)
1094{
1095 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001096 u16 ring_prod;
1097 int i, j;
1098
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001099 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001100 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001101 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001102
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001103 DP(NETIF_MSG_IFUP,
1104 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1105
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001106 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001107 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001108 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001109 struct bnx2x_agg_info *tpa_info =
1110 &fp->tpa_info[i];
1111 struct sw_rx_bd *first_buf =
1112 &tpa_info->first_buf;
1113
Eric Dumazete52fcb22011-11-14 06:05:34 +00001114 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1115 GFP_ATOMIC);
1116 if (!first_buf->data) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001117 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1118 j);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001119 bnx2x_free_tpa_pool(bp, fp, i);
1120 fp->disable_tpa = 1;
1121 break;
1122 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001123 dma_unmap_addr_set(first_buf, mapping, 0);
1124 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001125 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001126
1127 /* "next page" elements initialization */
1128 bnx2x_set_next_page_sgl(fp);
1129
1130 /* set SGEs bit mask */
1131 bnx2x_init_sge_ring_bit_mask(fp);
1132
1133 /* Allocate SGEs and initialize the ring elements */
1134 for (i = 0, ring_prod = 0;
1135 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1136
1137 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001138 BNX2X_ERR("was only able to allocate %d rx sges\n",
1139 i);
1140 BNX2X_ERR("disabling TPA for queue[%d]\n",
1141 j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001142 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001143 bnx2x_free_rx_sge_range(bp, fp,
1144 ring_prod);
1145 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001146 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001147 fp->disable_tpa = 1;
1148 ring_prod = 0;
1149 break;
1150 }
1151 ring_prod = NEXT_SGE_IDX(ring_prod);
1152 }
1153
1154 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001155 }
1156 }
1157
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001158 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001159 struct bnx2x_fastpath *fp = &bp->fp[j];
1160
1161 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001162
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001163 /* Activate BD ring */
1164 /* Warning!
1165 * this will generate an interrupt (to the TSTORM)
1166 * must only be done after chip is initialized
1167 */
1168 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1169 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001170
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001171 if (j != 0)
1172 continue;
1173
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001174 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001175 REG_WR(bp, BAR_USTRORM_INTMEM +
1176 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1177 U64_LO(fp->rx_comp_mapping));
1178 REG_WR(bp, BAR_USTRORM_INTMEM +
1179 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1180 U64_HI(fp->rx_comp_mapping));
1181 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001182 }
1183}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001184
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001185static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1186{
1187 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001188 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001189
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001190 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001191 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001192 for_each_cos_in_tx_queue(fp, cos) {
1193 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001194 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001195
Ariel Elior6383c0b2011-07-14 08:31:57 +00001196 u16 sw_prod = txdata->tx_pkt_prod;
1197 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001198
Ariel Elior6383c0b2011-07-14 08:31:57 +00001199 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001200 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1201 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001202 sw_cons++;
1203 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001204 netdev_tx_reset_queue(
1205 netdev_get_tx_queue(bp->dev, txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001206 }
1207 }
1208}
1209
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001210static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1211{
1212 struct bnx2x *bp = fp->bp;
1213 int i;
1214
1215 /* ring wasn't allocated */
1216 if (fp->rx_buf_ring == NULL)
1217 return;
1218
1219 for (i = 0; i < NUM_RX_BD; i++) {
1220 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001221 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001222
Eric Dumazete52fcb22011-11-14 06:05:34 +00001223 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001224 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001225 dma_unmap_single(&bp->pdev->dev,
1226 dma_unmap_addr(rx_buf, mapping),
1227 fp->rx_buf_size, DMA_FROM_DEVICE);
1228
Eric Dumazete52fcb22011-11-14 06:05:34 +00001229 rx_buf->data = NULL;
1230 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001231 }
1232}
1233
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001234static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1235{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001236 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001237
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001238 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001239 struct bnx2x_fastpath *fp = &bp->fp[j];
1240
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001241 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001242
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001243 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001244 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001245 }
1246}
1247
1248void bnx2x_free_skbs(struct bnx2x *bp)
1249{
1250 bnx2x_free_tx_skbs(bp);
1251 bnx2x_free_rx_skbs(bp);
1252}
1253
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001254void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1255{
1256 /* load old values */
1257 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1258
1259 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1260 /* leave all but MAX value */
1261 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1262
1263 /* set new MAX value */
1264 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1265 & FUNC_MF_CFG_MAX_BW_MASK;
1266
1267 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1268 }
1269}
1270
Dmitry Kravkovca924292011-06-14 01:33:08 +00001271/**
1272 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1273 *
1274 * @bp: driver handle
1275 * @nvecs: number of vectors to be released
1276 */
1277static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001279 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001280
Dmitry Kravkovca924292011-06-14 01:33:08 +00001281 if (nvecs == offset)
1282 return;
1283 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001284 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001285 bp->msix_table[offset].vector);
1286 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001287#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001288 if (nvecs == offset)
1289 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001290 offset++;
1291#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292
Dmitry Kravkovca924292011-06-14 01:33:08 +00001293 for_each_eth_queue(bp, i) {
1294 if (nvecs == offset)
1295 return;
Merav Sicron51c1a582012-03-18 10:33:38 +00001296 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1297 i, bp->msix_table[offset].vector);
Dmitry Kravkovca924292011-06-14 01:33:08 +00001298
1299 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300 }
1301}
1302
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001303void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001304{
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001305 if (bp->flags & USING_MSIX_FLAG &&
1306 !(bp->flags & USING_SINGLE_MSIX_FLAG))
Dmitry Kravkovca924292011-06-14 01:33:08 +00001307 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001308 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001309 else
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001310 free_irq(bp->dev->irq, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001311}
1312
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001313int __devinit bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001314{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001315 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001316
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001317 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001318 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001319 bp->msix_table[0].entry);
1320 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001321
1322#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001323 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001324 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001325 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1326 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001327#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001328 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001329 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001330 bp->msix_table[msix_vec].entry = msix_vec;
Merav Sicron51c1a582012-03-18 10:33:38 +00001331 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1332 msix_vec, msix_vec, i);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001333 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001334 }
1335
Ariel Elior6383c0b2011-07-14 08:31:57 +00001336 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001337
1338 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001339
1340 /*
1341 * reconfigure number of tx/rx queues according to available
1342 * MSI-X vectors
1343 */
1344 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001345 /* how less vectors we will have? */
1346 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001347
Merav Sicron51c1a582012-03-18 10:33:38 +00001348 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001349
1350 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1351
1352 if (rc) {
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001353 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1354 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001356 /*
1357 * decrease number of queues by number of unallocated entries
1358 */
1359 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001360
Merav Sicron51c1a582012-03-18 10:33:38 +00001361 BNX2X_DEV_INFO("New queue configuration set: %d\n",
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001362 bp->num_queues);
1363 } else if (rc > 0) {
1364 /* Get by with single vector */
1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1366 if (rc) {
1367 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1368 rc);
1369 goto no_msix;
1370 }
1371
1372 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1373 bp->flags |= USING_SINGLE_MSIX_FLAG;
1374
1375 } else if (rc < 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001376 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001377 goto no_msix;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001378 }
1379
1380 bp->flags |= USING_MSIX_FLAG;
1381
1382 return 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001383
1384no_msix:
1385 /* fall to INTx if not enough memory */
1386 if (rc == -ENOMEM)
1387 bp->flags |= DISABLE_MSI_FLAG;
1388
1389 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001390}
1391
1392static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1393{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001394 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001395
Dmitry Kravkovca924292011-06-14 01:33:08 +00001396 rc = request_irq(bp->msix_table[offset++].vector,
1397 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398 bp->dev->name, bp->dev);
1399 if (rc) {
1400 BNX2X_ERR("request sp irq failed\n");
1401 return -EBUSY;
1402 }
1403
1404#ifdef BCM_CNIC
1405 offset++;
1406#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001407 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001408 struct bnx2x_fastpath *fp = &bp->fp[i];
1409 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1410 bp->dev->name, i);
1411
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001412 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413 bnx2x_msix_fp_int, 0, fp->name, fp);
1414 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001415 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1416 bp->msix_table[offset].vector, rc);
1417 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001418 return -EBUSY;
1419 }
1420
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001421 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001422 }
1423
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001424 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001425 offset = 1 + CNIC_PRESENT;
Merav Sicron51c1a582012-03-18 10:33:38 +00001426 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001427 bp->msix_table[0].vector,
1428 0, bp->msix_table[offset].vector,
1429 i - 1, bp->msix_table[offset + i - 1].vector);
1430
1431 return 0;
1432}
1433
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001434int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001435{
1436 int rc;
1437
1438 rc = pci_enable_msi(bp->pdev);
1439 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001440 BNX2X_DEV_INFO("MSI is not attainable\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001441 return -1;
1442 }
1443 bp->flags |= USING_MSI_FLAG;
1444
1445 return 0;
1446}
1447
1448static int bnx2x_req_irq(struct bnx2x *bp)
1449{
1450 unsigned long flags;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001451 unsigned int irq;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001452
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001453 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001454 flags = 0;
1455 else
1456 flags = IRQF_SHARED;
1457
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001458 if (bp->flags & USING_MSIX_FLAG)
1459 irq = bp->msix_table[0].vector;
1460 else
1461 irq = bp->pdev->irq;
1462
1463 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464}
1465
Eric Dumazet1191cb82012-04-27 21:39:21 +00001466static int bnx2x_setup_irqs(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001467{
1468 int rc = 0;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001469 if (bp->flags & USING_MSIX_FLAG &&
1470 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001471 rc = bnx2x_req_msix_irqs(bp);
1472 if (rc)
1473 return rc;
1474 } else {
1475 bnx2x_ack_int(bp);
1476 rc = bnx2x_req_irq(bp);
1477 if (rc) {
1478 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1479 return rc;
1480 }
1481 if (bp->flags & USING_MSI_FLAG) {
1482 bp->dev->irq = bp->pdev->irq;
Dmitry Kravkov30a5de72012-04-03 18:41:26 +00001483 netdev_info(bp->dev, "using MSI IRQ %d\n",
1484 bp->dev->irq);
1485 }
1486 if (bp->flags & USING_MSIX_FLAG) {
1487 bp->dev->irq = bp->msix_table[0].vector;
1488 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1489 bp->dev->irq);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001490 }
1491 }
1492
1493 return 0;
1494}
1495
Eric Dumazet1191cb82012-04-27 21:39:21 +00001496static void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001497{
1498 int i;
1499
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001500 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001501 napi_enable(&bnx2x_fp(bp, i, napi));
1502}
1503
Eric Dumazet1191cb82012-04-27 21:39:21 +00001504static void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001505{
1506 int i;
1507
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001508 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001509 napi_disable(&bnx2x_fp(bp, i, napi));
1510}
1511
1512void bnx2x_netif_start(struct bnx2x *bp)
1513{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001514 if (netif_running(bp->dev)) {
1515 bnx2x_napi_enable(bp);
1516 bnx2x_int_enable(bp);
1517 if (bp->state == BNX2X_STATE_OPEN)
1518 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001519 }
1520}
1521
1522void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1523{
1524 bnx2x_int_disable_sync(bp, disable_hw);
1525 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001526}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001527
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001528u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1529{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001530 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001531
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001532#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001533 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001534 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1535 u16 ether_type = ntohs(hdr->h_proto);
1536
1537 /* Skip VLAN tag if present */
1538 if (ether_type == ETH_P_8021Q) {
1539 struct vlan_ethhdr *vhdr =
1540 (struct vlan_ethhdr *)skb->data;
1541
1542 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1543 }
1544
1545 /* If ethertype is FCoE or FIP - use FCoE ring */
1546 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001547 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001548 }
1549#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001550 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001551 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001552}
1553
Dmitry Kravkov96305232012-04-03 18:41:30 +00001554
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001555void bnx2x_set_num_queues(struct bnx2x *bp)
1556{
Dmitry Kravkov96305232012-04-03 18:41:30 +00001557 /* RSS queues */
1558 bp->num_queues = bnx2x_calc_num_queues(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001559
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001560#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00001561 /* override in STORAGE SD modes */
1562 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001563 bp->num_queues = 1;
1564#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001565 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001566 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001567}
1568
David S. Miller823dcd22011-08-20 10:39:12 -07001569/**
1570 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1571 *
1572 * @bp: Driver handle
1573 *
1574 * We currently support for at most 16 Tx queues for each CoS thus we will
1575 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1576 * bp->max_cos.
1577 *
1578 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1579 * index after all ETH L2 indices.
1580 *
1581 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1582 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1583 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1584 *
1585 * The proper configuration of skb->queue_mapping is handled by
1586 * bnx2x_select_queue() and __skb_tx_hash().
1587 *
1588 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1589 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1590 */
Eric Dumazet1191cb82012-04-27 21:39:21 +00001591static int bnx2x_set_real_num_queues(struct bnx2x *bp)
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001592{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001593 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001594
Ariel Elior6383c0b2011-07-14 08:31:57 +00001595 tx = MAX_TXQS_PER_COS * bp->max_cos;
1596 rx = BNX2X_NUM_ETH_QUEUES(bp);
1597
1598/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001599#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001600 if (!NO_FCOE(bp)) {
1601 rx += FCOE_PRESENT;
1602 tx += FCOE_PRESENT;
1603 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001604#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001605
1606 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1607 if (rc) {
1608 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1609 return rc;
1610 }
1611 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1612 if (rc) {
1613 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1614 return rc;
1615 }
1616
Merav Sicron51c1a582012-03-18 10:33:38 +00001617 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00001618 tx, rx);
1619
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001620 return rc;
1621}
1622
Eric Dumazet1191cb82012-04-27 21:39:21 +00001623static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001624{
1625 int i;
1626
1627 for_each_queue(bp, i) {
1628 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001629 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001630
1631 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1632 if (IS_FCOE_IDX(i))
1633 /*
1634 * Although there are no IP frames expected to arrive to
1635 * this ring we still want to add an
1636 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1637 * overrun attack.
1638 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001639 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001640 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001641 mtu = bp->dev->mtu;
1642 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1643 IP_HEADER_ALIGNMENT_PADDING +
1644 ETH_OVREHEAD +
1645 mtu +
1646 BNX2X_FW_RX_ALIGN_END;
1647 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001648 }
1649}
1650
Eric Dumazet1191cb82012-04-27 21:39:21 +00001651static int bnx2x_init_rss_pf(struct bnx2x *bp)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001652{
1653 int i;
1654 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1655 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1656
Dmitry Kravkov96305232012-04-03 18:41:30 +00001657 /* Prepare the initial contents fo the indirection table if RSS is
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001658 * enabled
1659 */
Dmitry Kravkov96305232012-04-03 18:41:30 +00001660 for (i = 0; i < sizeof(ind_table); i++)
1661 ind_table[i] =
1662 bp->fp->cl_id +
1663 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001664
1665 /*
1666 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1667 * per-port, so if explicit configuration is needed , do it only
1668 * for a PMF.
1669 *
1670 * For 57712 and newer on the other hand it's a per-function
1671 * configuration.
1672 */
Dmitry Kravkov96305232012-04-03 18:41:30 +00001673 return bnx2x_config_rss_eth(bp, ind_table,
1674 bp->port.pmf || !CHIP_IS_E1x(bp));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001675}
1676
Dmitry Kravkov96305232012-04-03 18:41:30 +00001677int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1678 u8 *ind_table, bool config_hash)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001679{
Yuval Mintz3b603062012-03-18 10:33:39 +00001680 struct bnx2x_config_rss_params params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001681 int i;
1682
1683 /* Although RSS is meaningless when there is a single HW queue we
1684 * still need it enabled in order to have HW Rx hash generated.
1685 *
1686 * if (!is_eth_multi(bp))
1687 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1688 */
1689
Dmitry Kravkov96305232012-04-03 18:41:30 +00001690 params.rss_obj = rss_obj;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001691
1692 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1693
Dmitry Kravkov96305232012-04-03 18:41:30 +00001694 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001695
Dmitry Kravkov96305232012-04-03 18:41:30 +00001696 /* RSS configuration */
1697 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1698 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1699 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1700 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001701
Dmitry Kravkov96305232012-04-03 18:41:30 +00001702 /* Hash bits */
1703 params.rss_result_mask = MULTI_MASK;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001704
Dmitry Kravkov96305232012-04-03 18:41:30 +00001705 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001706
Dmitry Kravkov96305232012-04-03 18:41:30 +00001707 if (config_hash) {
1708 /* RSS keys */
1709 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1710 params.rss_key[i] = random32();
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001711
Dmitry Kravkov96305232012-04-03 18:41:30 +00001712 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001713 }
1714
1715 return bnx2x_config_rss(bp, &params);
1716}
1717
Eric Dumazet1191cb82012-04-27 21:39:21 +00001718static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001719{
Yuval Mintz3b603062012-03-18 10:33:39 +00001720 struct bnx2x_func_state_params func_params = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001721
1722 /* Prepare parameters for function state transitions */
1723 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1724
1725 func_params.f_obj = &bp->func_obj;
1726 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1727
1728 func_params.params.hw_init.load_phase = load_code;
1729
1730 return bnx2x_func_state_change(bp, &func_params);
1731}
1732
1733/*
1734 * Cleans the object that have internal lists without sending
1735 * ramrods. Should be run when interrutps are disabled.
1736 */
1737static void bnx2x_squeeze_objects(struct bnx2x *bp)
1738{
1739 int rc;
1740 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
Yuval Mintz3b603062012-03-18 10:33:39 +00001741 struct bnx2x_mcast_ramrod_params rparam = {NULL};
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001742 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1743
1744 /***************** Cleanup MACs' object first *************************/
1745
1746 /* Wait for completion of requested */
1747 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1748 /* Perform a dry cleanup */
1749 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1750
1751 /* Clean ETH primary MAC */
1752 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1753 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1754 &ramrod_flags);
1755 if (rc != 0)
1756 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1757
1758 /* Cleanup UC list */
1759 vlan_mac_flags = 0;
1760 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1761 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1762 &ramrod_flags);
1763 if (rc != 0)
1764 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1765
1766 /***************** Now clean mcast object *****************************/
1767 rparam.mcast_obj = &bp->mcast_obj;
1768 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1769
1770 /* Add a DEL command... */
1771 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1772 if (rc < 0)
Merav Sicron51c1a582012-03-18 10:33:38 +00001773 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1774 rc);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001775
1776 /* ...and wait until all pending commands are cleared */
1777 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1778 while (rc != 0) {
1779 if (rc < 0) {
1780 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1781 rc);
1782 return;
1783 }
1784
1785 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1786 }
1787}
1788
1789#ifndef BNX2X_STOP_ON_ERROR
1790#define LOAD_ERROR_EXIT(bp, label) \
1791 do { \
1792 (bp)->state = BNX2X_STATE_ERROR; \
1793 goto label; \
1794 } while (0)
1795#else
1796#define LOAD_ERROR_EXIT(bp, label) \
1797 do { \
1798 (bp)->state = BNX2X_STATE_ERROR; \
1799 (bp)->panic = 1; \
1800 return -EBUSY; \
1801 } while (0)
1802#endif
1803
Yuval Mintz452427b2012-03-26 20:47:07 +00001804bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1805{
1806 /* build FW version dword */
1807 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1808 (BCM_5710_FW_MINOR_VERSION << 8) +
1809 (BCM_5710_FW_REVISION_VERSION << 16) +
1810 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1811
1812 /* read loaded FW from chip */
1813 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1814
1815 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1816
1817 if (loaded_fw != my_fw) {
1818 if (is_err)
1819 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1820 loaded_fw, my_fw);
1821 return false;
1822 }
1823
1824 return true;
1825}
1826
Eric Dumazet1191cb82012-04-27 21:39:21 +00001827/**
1828 * bnx2x_bz_fp - zero content of the fastpath structure.
1829 *
1830 * @bp: driver handle
1831 * @index: fastpath index to be zeroed
1832 *
1833 * Makes sure the contents of the bp->fp[index].napi is kept
1834 * intact.
1835 */
1836static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1837{
1838 struct bnx2x_fastpath *fp = &bp->fp[index];
1839 struct napi_struct orig_napi = fp->napi;
1840 /* bzero bnx2x_fastpath contents */
1841 if (bp->stats_init)
1842 memset(fp, 0, sizeof(*fp));
1843 else {
1844 /* Keep Queue statistics */
1845 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1846 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1847
1848 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1849 GFP_KERNEL);
1850 if (tmp_eth_q_stats)
1851 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1852 sizeof(struct bnx2x_eth_q_stats));
1853
1854 tmp_eth_q_stats_old =
1855 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1856 GFP_KERNEL);
1857 if (tmp_eth_q_stats_old)
1858 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1859 sizeof(struct bnx2x_eth_q_stats_old));
1860
1861 memset(fp, 0, sizeof(*fp));
1862
1863 if (tmp_eth_q_stats) {
1864 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1865 sizeof(struct bnx2x_eth_q_stats));
1866 kfree(tmp_eth_q_stats);
1867 }
1868
1869 if (tmp_eth_q_stats_old) {
1870 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1871 sizeof(struct bnx2x_eth_q_stats_old));
1872 kfree(tmp_eth_q_stats_old);
1873 }
1874
1875 }
1876
1877 /* Restore the NAPI object as it has been already initialized */
1878 fp->napi = orig_napi;
1879
1880 fp->bp = bp;
1881 fp->index = index;
1882 if (IS_ETH_FP(fp))
1883 fp->max_cos = bp->max_cos;
1884 else
1885 /* Special queues support only one CoS */
1886 fp->max_cos = 1;
1887
1888 /*
1889 * set the tpa flag for each queue. The tpa flag determines the queue
1890 * minimal size so it must be set prior to queue memory allocation
1891 */
1892 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1893 (bp->flags & GRO_ENABLE_FLAG &&
1894 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1895 if (bp->flags & TPA_ENABLE_FLAG)
1896 fp->mode = TPA_MODE_LRO;
1897 else if (bp->flags & GRO_ENABLE_FLAG)
1898 fp->mode = TPA_MODE_GRO;
1899
1900#ifdef BCM_CNIC
1901 /* We don't want TPA on an FCoE L2 ring */
1902 if (IS_FCOE_FP(fp))
1903 fp->disable_tpa = 1;
1904#endif
1905}
1906
1907
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001908/* must be called with rtnl_lock */
1909int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1910{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001911 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001912 u32 load_code;
1913 int i, rc;
1914
1915#ifdef BNX2X_STOP_ON_ERROR
Merav Sicron51c1a582012-03-18 10:33:38 +00001916 if (unlikely(bp->panic)) {
1917 BNX2X_ERR("Can't load NIC when there is panic\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001918 return -EPERM;
Merav Sicron51c1a582012-03-18 10:33:38 +00001919 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001920#endif
1921
1922 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1923
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001924 /* Set the initial link reported state to link down */
1925 bnx2x_acquire_phy_lock(bp);
1926 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1927 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1928 &bp->last_reported_link.link_report_flags);
1929 bnx2x_release_phy_lock(bp);
1930
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001931 /* must be called before memory allocation and HW init */
1932 bnx2x_ilt_set_info(bp);
1933
Ariel Elior6383c0b2011-07-14 08:31:57 +00001934 /*
1935 * Zero fastpath structures preserving invariants like napi, which are
1936 * allocated only once, fp index, max_cos, bp pointer.
1937 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001938 */
Merav Sicron51c1a582012-03-18 10:33:38 +00001939 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001940 for_each_queue(bp, i)
1941 bnx2x_bz_fp(bp, i);
1942
Ariel Elior6383c0b2011-07-14 08:31:57 +00001943
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001944 /* Set the receive queues buffer size */
1945 bnx2x_set_rx_buf_size(bp);
1946
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001947 if (bnx2x_alloc_mem(bp))
1948 return -ENOMEM;
1949
1950 /* As long as bnx2x_alloc_mem() may possibly update
1951 * bp->num_queues, bnx2x_set_real_num_queues() should always
1952 * come after it.
1953 */
1954 rc = bnx2x_set_real_num_queues(bp);
1955 if (rc) {
1956 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001958 }
1959
Ariel Elior6383c0b2011-07-14 08:31:57 +00001960 /* configure multi cos mappings in kernel.
1961 * this configuration may be overriden by a multi class queue discipline
1962 * or by a dcbx negotiation result.
1963 */
1964 bnx2x_setup_tc(bp->dev, bp->max_cos);
1965
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001966 bnx2x_napi_enable(bp);
1967
Ariel Elior889b9af2012-01-26 06:01:51 +00001968 /* set pf load just before approaching the MCP */
1969 bnx2x_set_pf_load(bp);
1970
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001971 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001972 * Returns the type of LOAD command:
1973 * if it is the first port to be initialized
1974 * common blocks should be initialized, otherwise - not
1975 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001976 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00001977 /* init fw_seq */
1978 bp->fw_seq =
1979 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1980 DRV_MSG_SEQ_NUMBER_MASK);
1981 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1982
1983 /* Get current FW pulse sequence */
1984 bp->fw_drv_pulse_wr_seq =
1985 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1986 DRV_PULSE_SEQ_MASK);
1987 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1988
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001989 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001990 if (!load_code) {
1991 BNX2X_ERR("MCP response failure, aborting\n");
1992 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001993 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001994 }
1995 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
Merav Sicron51c1a582012-03-18 10:33:38 +00001996 BNX2X_ERR("Driver load refused\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001997 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001998 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001999 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00002000 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2001 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002002 /* abort nic load if version mismatch */
Yuval Mintz452427b2012-03-26 20:47:07 +00002003 if (!bnx2x_test_firmware_version(bp, true)) {
Ariel Eliord1e2d962012-01-26 06:01:49 +00002004 rc = -EBUSY;
2005 LOAD_ERROR_EXIT(bp, load_error2);
2006 }
2007 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002008
2009 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002010 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002011
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002012 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2013 path, load_count[path][0], load_count[path][1],
2014 load_count[path][2]);
2015 load_count[path][0]++;
2016 load_count[path][1 + port]++;
2017 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2018 path, load_count[path][0], load_count[path][1],
2019 load_count[path][2]);
2020 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002021 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002022 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002023 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2024 else
2025 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2026 }
2027
2028 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002029 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002030 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002031 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002032 /*
2033 * We need the barrier to ensure the ordering between the
2034 * writing to bp->port.pmf here and reading it from the
2035 * bnx2x_periodic_task().
2036 */
2037 smp_mb();
Yaniv Rosner3deb8162011-06-14 01:34:33 +00002038 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002039 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002040
Merav Sicron51c1a582012-03-18 10:33:38 +00002041 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002042
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002043 /* Init Function state controlling object */
2044 bnx2x__init_func_obj(bp);
2045
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002046 /* Initialize HW */
2047 rc = bnx2x_init_hw(bp, load_code);
2048 if (rc) {
2049 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002050 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002051 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002052 }
2053
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002054 /* Connect to IRQs */
2055 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002056 if (rc) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002057 BNX2X_ERR("IRQs setup failed\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002058 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002059 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002060 }
2061
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002062 /* Setup NIC internals and enable interrupts */
2063 bnx2x_nic_init(bp, load_code);
2064
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002065 /* Init per-function objects */
2066 bnx2x_init_bp_objs(bp);
2067
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002068 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2069 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002070 (bp->common.shmem2_base)) {
2071 if (SHMEM2_HAS(bp, dcc_support))
2072 SHMEM2_WR(bp, dcc_support,
2073 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2074 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
Barak Witkowskia3348722012-04-23 03:04:46 +00002075 if (SHMEM2_HAS(bp, afex_driver_support))
2076 SHMEM2_WR(bp, afex_driver_support,
2077 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002078 }
2079
Barak Witkowskia3348722012-04-23 03:04:46 +00002080 /* Set AFEX default VLAN tag to an invalid value */
2081 bp->afex_def_vlan_tag = -1;
2082
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002083 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2084 rc = bnx2x_func_start(bp);
2085 if (rc) {
2086 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00002087 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002088 LOAD_ERROR_EXIT(bp, load_error3);
2089 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002090
2091 /* Send LOAD_DONE command to MCP */
2092 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002093 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002094 if (!load_code) {
2095 BNX2X_ERR("MCP response failure, aborting\n");
2096 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002097 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002098 }
2099 }
2100
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002101 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002102 if (rc) {
2103 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002104 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002105 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002107#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002108 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002109 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002110#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002111
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002112 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002113 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Merav Sicron51c1a582012-03-18 10:33:38 +00002114 if (rc) {
2115 BNX2X_ERR("Queue setup failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002116 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002117 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002118 }
2119
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002120 rc = bnx2x_init_rss_pf(bp);
Merav Sicron51c1a582012-03-18 10:33:38 +00002121 if (rc) {
2122 BNX2X_ERR("PF RSS init failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002123 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002124 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002125
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002126 /* Now when Clients are configured we are ready to work */
2127 bp->state = BNX2X_STATE_OPEN;
2128
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002129 /* Configure a ucast MAC */
2130 rc = bnx2x_set_eth_mac(bp, true);
Merav Sicron51c1a582012-03-18 10:33:38 +00002131 if (rc) {
2132 BNX2X_ERR("Setting Ethernet MAC failed\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002133 LOAD_ERROR_EXIT(bp, load_error4);
Merav Sicron51c1a582012-03-18 10:33:38 +00002134 }
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08002135
Dmitry Kravkove3835b92011-03-06 10:50:44 +00002136 if (bp->pending_max) {
2137 bnx2x_update_max_mf_config(bp, bp->pending_max);
2138 bp->pending_max = 0;
2139 }
2140
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002141 if (bp->port.pmf)
2142 bnx2x_initial_phy_init(bp, load_mode);
2143
2144 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002145
2146 /* Initialize Rx filter. */
2147 netif_addr_lock_bh(bp->dev);
2148 bnx2x_set_rx_mode(bp->dev);
2149 netif_addr_unlock_bh(bp->dev);
2150
2151 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002152 switch (load_mode) {
2153 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002154 /* Tx queue should be only reenabled */
2155 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002156 break;
2157
2158 case LOAD_OPEN:
2159 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002160 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002161 break;
2162
2163 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002164 bp->state = BNX2X_STATE_DIAG;
2165 break;
2166
2167 default:
2168 break;
2169 }
2170
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002171 if (bp->port.pmf)
Yuval Mintze695a2d2012-03-12 11:22:06 +00002172 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
Dmitry Kravkov00253a82011-11-13 04:34:25 +00002173 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174 bnx2x__link_status_update(bp);
2175
2176 /* start the timer */
2177 mod_timer(&bp->timer, jiffies + bp->current_interval);
2178
2179#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00002180 /* re-read iscsi info */
2181 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002182 bnx2x_setup_cnic_irq_info(bp);
2183 if (bp->state == BNX2X_STATE_OPEN)
2184 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2185#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002186
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002187 /* mark driver is loaded in shmem2 */
2188 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2189 u32 val;
2190 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2191 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2192 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2193 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2194 }
2195
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002196 /* Wait for all pending SP commands to complete */
2197 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2198 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2199 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2200 return -EBUSY;
2201 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00002202
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002203 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002204 return 0;
2205
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002206#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002207load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002208#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002209 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002210 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002211#endif
2212load_error3:
2213 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002214
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002215 /* Clean queueable objects */
2216 bnx2x_squeeze_objects(bp);
2217
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002218 /* Free SKBs, SGEs, TPA pool and driver internals */
2219 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002220 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002221 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002222
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002223 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002224 bnx2x_free_irq(bp);
2225load_error2:
2226 if (!BP_NOMCP(bp)) {
2227 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2228 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2229 }
2230
2231 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002232load_error1:
2233 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00002234 /* clear pf_load status, as it was already set */
2235 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002236load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002237 bnx2x_free_mem(bp);
2238
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002239 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002240#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002241}
2242
2243/* must be called with rtnl_lock */
2244int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2245{
2246 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002247 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002248
Yuval Mintz9ce392d2012-03-12 08:53:11 +00002249 /* mark driver is unloaded in shmem2 */
2250 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2251 u32 val;
2252 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2253 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2254 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2255 }
2256
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002257 if ((bp->state == BNX2X_STATE_CLOSED) ||
2258 (bp->state == BNX2X_STATE_ERROR)) {
2259 /* We can get here if the driver has been unloaded
2260 * during parity error recovery and is either waiting for a
2261 * leader to complete or for other functions to unload and
2262 * then ifdown has been issued. In this case we want to
2263 * unload and let other functions to complete a recovery
2264 * process.
2265 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002266 bp->recovery_state = BNX2X_RECOVERY_DONE;
2267 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002268 bnx2x_release_leader_lock(bp);
2269 smp_mb();
2270
Merav Sicron51c1a582012-03-18 10:33:38 +00002271 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2272 BNX2X_ERR("Can't unload in closed or error state\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002273 return -EINVAL;
2274 }
2275
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002276 /*
2277 * It's important to set the bp->state to the value different from
2278 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2279 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2280 */
2281 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2282 smp_mb();
2283
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002284 /* Stop Tx */
2285 bnx2x_tx_disable(bp);
2286
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002287#ifdef BCM_CNIC
2288 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2289#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002290
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002291 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002292
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002293 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002294
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002295 /* Set ALWAYS_ALIVE bit in shmem */
2296 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2297
2298 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002299
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002300 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002301 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002302
2303 /* Cleanup the chip if needed */
2304 if (unload_mode != UNLOAD_RECOVERY)
2305 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002306 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002307 /* Send the UNLOAD_REQUEST to the MCP */
2308 bnx2x_send_unload_req(bp, unload_mode);
2309
2310 /*
2311 * Prevent transactions to host from the functions on the
2312 * engine that doesn't reset global blocks in case of global
2313 * attention once gloabl blocks are reset and gates are opened
2314 * (the engine which leader will perform the recovery
2315 * last).
2316 */
2317 if (!CHIP_IS_E1x(bp))
2318 bnx2x_pf_disable(bp);
2319
2320 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002321 bnx2x_netif_stop(bp, 1);
2322
2323 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002324 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002325
2326 /* Report UNLOAD_DONE to MCP */
2327 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002328 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002329
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002330 /*
2331 * At this stage no more interrupts will arrive so we may safly clean
2332 * the queueable objects here in case they failed to get cleaned so far.
2333 */
2334 bnx2x_squeeze_objects(bp);
2335
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002336 /* There should be no more pending SP commands at this stage */
2337 bp->sp_state = 0;
2338
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002339 bp->port.pmf = 0;
2340
2341 /* Free SKBs, SGEs, TPA pool and driver internals */
2342 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002343 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002344 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002345
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002346 bnx2x_free_mem(bp);
2347
2348 bp->state = BNX2X_STATE_CLOSED;
2349
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002350 /* Check if there are pending parity attentions. If there are - set
2351 * RECOVERY_IN_PROGRESS.
2352 */
2353 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2354 bnx2x_set_reset_in_progress(bp);
2355
2356 /* Set RESET_IS_GLOBAL if needed */
2357 if (global)
2358 bnx2x_set_reset_global(bp);
2359 }
2360
2361
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002362 /* The last driver must disable a "close the gate" if there is no
2363 * parity attention or "process kill" pending.
2364 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002365 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002366 bnx2x_disable_close_the_gate(bp);
2367
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002368 return 0;
2369}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002370
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002371int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2372{
2373 u16 pmcsr;
2374
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002375 /* If there is no power capability, silently succeed */
2376 if (!bp->pm_cap) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002377 BNX2X_DEV_INFO("No power capability. Breaking.\n");
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002378 return 0;
2379 }
2380
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002381 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2382
2383 switch (state) {
2384 case PCI_D0:
2385 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2386 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2387 PCI_PM_CTRL_PME_STATUS));
2388
2389 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2390 /* delay required during transition out of D3hot */
2391 msleep(20);
2392 break;
2393
2394 case PCI_D3hot:
2395 /* If there are other clients above don't
2396 shut down the power */
2397 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2398 return 0;
2399 /* Don't shut down the power for emulation and FPGA */
2400 if (CHIP_REV_IS_SLOW(bp))
2401 return 0;
2402
2403 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2404 pmcsr |= 3;
2405
2406 if (bp->wol)
2407 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2408
2409 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2410 pmcsr);
2411
2412 /* No more memory access after this point until
2413 * device is brought back to D0.
2414 */
2415 break;
2416
2417 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00002418 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002419 return -EINVAL;
2420 }
2421 return 0;
2422}
2423
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002424/*
2425 * net_device service functions
2426 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002427int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002428{
2429 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002430 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002431 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2432 napi);
2433 struct bnx2x *bp = fp->bp;
2434
2435 while (1) {
2436#ifdef BNX2X_STOP_ON_ERROR
2437 if (unlikely(bp->panic)) {
2438 napi_complete(napi);
2439 return 0;
2440 }
2441#endif
2442
Ariel Elior6383c0b2011-07-14 08:31:57 +00002443 for_each_cos_in_tx_queue(fp, cos)
2444 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2445 bnx2x_tx_int(bp, &fp->txdata[cos]);
2446
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002447
2448 if (bnx2x_has_rx_work(fp)) {
2449 work_done += bnx2x_rx_int(fp, budget - work_done);
2450
2451 /* must not complete if we consumed full budget */
2452 if (work_done >= budget)
2453 break;
2454 }
2455
2456 /* Fall out from the NAPI loop if needed */
2457 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002458#ifdef BCM_CNIC
2459 /* No need to update SB for FCoE L2 ring as long as
2460 * it's connected to the default SB and the SB
2461 * has been updated when NAPI was scheduled.
2462 */
2463 if (IS_FCOE_FP(fp)) {
2464 napi_complete(napi);
2465 break;
2466 }
2467#endif
2468
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002469 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002470 /* bnx2x_has_rx_work() reads the status block,
2471 * thus we need to ensure that status block indices
2472 * have been actually read (bnx2x_update_fpsb_idx)
2473 * prior to this check (bnx2x_has_rx_work) so that
2474 * we won't write the "newer" value of the status block
2475 * to IGU (if there was a DMA right after
2476 * bnx2x_has_rx_work and if there is no rmb, the memory
2477 * reading (bnx2x_update_fpsb_idx) may be postponed
2478 * to right before bnx2x_ack_sb). In this case there
2479 * will never be another interrupt until there is
2480 * another update of the status block, while there
2481 * is still unhandled work.
2482 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002483 rmb();
2484
2485 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2486 napi_complete(napi);
2487 /* Re-enable interrupts */
Merav Sicron51c1a582012-03-18 10:33:38 +00002488 DP(NETIF_MSG_RX_STATUS,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002489 "Update index to %d\n", fp->fp_hc_idx);
2490 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2491 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002492 IGU_INT_ENABLE, 1);
2493 break;
2494 }
2495 }
2496 }
2497
2498 return work_done;
2499}
2500
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002501/* we split the first BD into headers and data BDs
2502 * to ease the pain of our fellow microcode engineers
2503 * we use one mapping for both BDs
2504 * So far this has only been observed to happen
2505 * in Other Operating Systems(TM)
2506 */
2507static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002508 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002509 struct sw_tx_bd *tx_buf,
2510 struct eth_tx_start_bd **tx_bd, u16 hlen,
2511 u16 bd_prod, int nbd)
2512{
2513 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2514 struct eth_tx_bd *d_tx_bd;
2515 dma_addr_t mapping;
2516 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2517
2518 /* first fix first BD */
2519 h_tx_bd->nbd = cpu_to_le16(nbd);
2520 h_tx_bd->nbytes = cpu_to_le16(hlen);
2521
Merav Sicron51c1a582012-03-18 10:33:38 +00002522 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2523 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002524
2525 /* now get a new data BD
2526 * (after the pbd) and fill it */
2527 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002528 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002529
2530 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2531 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2532
2533 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2534 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2535 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2536
2537 /* this marks the BD as one that has no individual mapping */
2538 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2539
2540 DP(NETIF_MSG_TX_QUEUED,
2541 "TSO split data size is %d (%x:%x)\n",
2542 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2543
2544 /* update tx_bd */
2545 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2546
2547 return bd_prod;
2548}
2549
2550static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2551{
2552 if (fix > 0)
2553 csum = (u16) ~csum_fold(csum_sub(csum,
2554 csum_partial(t_header - fix, fix, 0)));
2555
2556 else if (fix < 0)
2557 csum = (u16) ~csum_fold(csum_add(csum,
2558 csum_partial(t_header, -fix, 0)));
2559
2560 return swab16(csum);
2561}
2562
2563static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2564{
2565 u32 rc;
2566
2567 if (skb->ip_summed != CHECKSUM_PARTIAL)
2568 rc = XMIT_PLAIN;
2569
2570 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002571 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002572 rc = XMIT_CSUM_V6;
2573 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2574 rc |= XMIT_CSUM_TCP;
2575
2576 } else {
2577 rc = XMIT_CSUM_V4;
2578 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2579 rc |= XMIT_CSUM_TCP;
2580 }
2581 }
2582
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002583 if (skb_is_gso_v6(skb))
2584 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2585 else if (skb_is_gso(skb))
2586 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002587
2588 return rc;
2589}
2590
2591#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2592/* check if packet requires linearization (packet is too fragmented)
2593 no need to check fragmentation if page size > 8K (there will be no
2594 violation to FW restrictions) */
2595static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2596 u32 xmit_type)
2597{
2598 int to_copy = 0;
2599 int hlen = 0;
2600 int first_bd_sz = 0;
2601
2602 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2603 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2604
2605 if (xmit_type & XMIT_GSO) {
2606 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2607 /* Check if LSO packet needs to be copied:
2608 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2609 int wnd_size = MAX_FETCH_BD - 3;
2610 /* Number of windows to check */
2611 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2612 int wnd_idx = 0;
2613 int frag_idx = 0;
2614 u32 wnd_sum = 0;
2615
2616 /* Headers length */
2617 hlen = (int)(skb_transport_header(skb) - skb->data) +
2618 tcp_hdrlen(skb);
2619
2620 /* Amount of data (w/o headers) on linear part of SKB*/
2621 first_bd_sz = skb_headlen(skb) - hlen;
2622
2623 wnd_sum = first_bd_sz;
2624
2625 /* Calculate the first sum - it's special */
2626 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2627 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002628 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002629
2630 /* If there was data on linear skb data - check it */
2631 if (first_bd_sz > 0) {
2632 if (unlikely(wnd_sum < lso_mss)) {
2633 to_copy = 1;
2634 goto exit_lbl;
2635 }
2636
2637 wnd_sum -= first_bd_sz;
2638 }
2639
2640 /* Others are easier: run through the frag list and
2641 check all windows */
2642 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2643 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002644 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002645
2646 if (unlikely(wnd_sum < lso_mss)) {
2647 to_copy = 1;
2648 break;
2649 }
2650 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002651 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002652 }
2653 } else {
2654 /* in non-LSO too fragmented packet should always
2655 be linearized */
2656 to_copy = 1;
2657 }
2658 }
2659
2660exit_lbl:
2661 if (unlikely(to_copy))
2662 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00002663 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002664 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2665 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2666
2667 return to_copy;
2668}
2669#endif
2670
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002671static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2672 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002673{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002674 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2675 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2676 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002677 if ((xmit_type & XMIT_GSO_V6) &&
2678 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002679 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002680}
2681
2682/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002683 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002684 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002685 * @skb: packet skb
2686 * @pbd: parse BD
2687 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002688 */
2689static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2690 struct eth_tx_parse_bd_e1x *pbd,
2691 u32 xmit_type)
2692{
2693 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2694 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2695 pbd->tcp_flags = pbd_tcp_flags(skb);
2696
2697 if (xmit_type & XMIT_GSO_V4) {
2698 pbd->ip_id = swab16(ip_hdr(skb)->id);
2699 pbd->tcp_pseudo_csum =
2700 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2701 ip_hdr(skb)->daddr,
2702 0, IPPROTO_TCP, 0));
2703
2704 } else
2705 pbd->tcp_pseudo_csum =
2706 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2707 &ipv6_hdr(skb)->daddr,
2708 0, IPPROTO_TCP, 0));
2709
2710 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2711}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002712
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002713/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002714 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002715 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002716 * @bp: driver handle
2717 * @skb: packet skb
2718 * @parsing_data: data to be updated
2719 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002720 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002721 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002722 */
2723static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002724 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002725{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002726 *parsing_data |=
2727 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2728 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2729 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002730
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002731 if (xmit_type & XMIT_CSUM_TCP) {
2732 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2733 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2734 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002735
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002736 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2737 } else
2738 /* We support checksum offload for TCP and UDP only.
2739 * No need to pass the UDP header length - it's a constant.
2740 */
2741 return skb_transport_header(skb) +
2742 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002743}
2744
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002745static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2746 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2747{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002748 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2749
2750 if (xmit_type & XMIT_CSUM_V4)
2751 tx_start_bd->bd_flags.as_bitfield |=
2752 ETH_TX_BD_FLAGS_IP_CSUM;
2753 else
2754 tx_start_bd->bd_flags.as_bitfield |=
2755 ETH_TX_BD_FLAGS_IPV6;
2756
2757 if (!(xmit_type & XMIT_CSUM_TCP))
2758 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002759}
2760
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002761/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002762 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002763 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002764 * @bp: driver handle
2765 * @skb: packet skb
2766 * @pbd: parse BD to be updated
2767 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002768 */
2769static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2770 struct eth_tx_parse_bd_e1x *pbd,
2771 u32 xmit_type)
2772{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002773 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002774
2775 /* for now NS flag is not used in Linux */
2776 pbd->global_data =
2777 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2778 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2779
2780 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002781 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002782
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002783 hlen += pbd->ip_hlen_w;
2784
2785 /* We support checksum offload for TCP and UDP only */
2786 if (xmit_type & XMIT_CSUM_TCP)
2787 hlen += tcp_hdrlen(skb) / 2;
2788 else
2789 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002790
2791 pbd->total_hlen_w = cpu_to_le16(hlen);
2792 hlen = hlen*2;
2793
2794 if (xmit_type & XMIT_CSUM_TCP) {
2795 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2796
2797 } else {
2798 s8 fix = SKB_CS_OFF(skb); /* signed! */
2799
2800 DP(NETIF_MSG_TX_QUEUED,
2801 "hlen %d fix %d csum before fix %x\n",
2802 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2803
2804 /* HW bug: fixup the CSUM */
2805 pbd->tcp_pseudo_csum =
2806 bnx2x_csum_fix(skb_transport_header(skb),
2807 SKB_CS(skb), fix);
2808
2809 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2810 pbd->tcp_pseudo_csum);
2811 }
2812
2813 return hlen;
2814}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002815
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002816/* called with netif_tx_lock
2817 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2818 * netif_wake_queue()
2819 */
2820netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2821{
2822 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002823
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002824 struct bnx2x_fastpath *fp;
2825 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002826 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002828 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002829 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002830 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002831 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002832 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002833 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002834 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002835 dma_addr_t mapping;
2836 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2837 int i;
2838 u8 hlen = 0;
2839 __le16 pkt_size = 0;
2840 struct ethhdr *eth;
2841 u8 mac_type = UNICAST_ADDRESS;
2842
2843#ifdef BNX2X_STOP_ON_ERROR
2844 if (unlikely(bp->panic))
2845 return NETDEV_TX_BUSY;
2846#endif
2847
Ariel Elior6383c0b2011-07-14 08:31:57 +00002848 txq_index = skb_get_queue_mapping(skb);
2849 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002850
Ariel Elior6383c0b2011-07-14 08:31:57 +00002851 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2852
2853 /* decode the fastpath index and the cos index from the txq */
2854 fp_index = TXQ_TO_FP(txq_index);
2855 txdata_index = TXQ_TO_COS(txq_index);
2856
2857#ifdef BCM_CNIC
2858 /*
2859 * Override the above for the FCoE queue:
2860 * - FCoE fp entry is right after the ETH entries.
2861 * - FCoE L2 queue uses bp->txdata[0] only.
2862 */
2863 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2864 bnx2x_fcoe_tx(bp, txq_index)))) {
2865 fp_index = FCOE_IDX;
2866 txdata_index = 0;
2867 }
2868#endif
2869
2870 /* enable this debug print to view the transmission queue being used
Merav Sicron51c1a582012-03-18 10:33:38 +00002871 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002872 txq_index, fp_index, txdata_index); */
2873
2874 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002875 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002876 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002877
Ariel Elior6383c0b2011-07-14 08:31:57 +00002878 /* enable this debug print to view the tranmission details
Merav Sicron51c1a582012-03-18 10:33:38 +00002879 DP(NETIF_MSG_TX_QUEUED,
2880 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002881 txdata->cid, fp_index, txdata_index, txdata, fp); */
2882
2883 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2884 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002885 fp->eth_q_stats.driver_xoff++;
2886 netif_tx_stop_queue(txq);
2887 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2888 return NETDEV_TX_BUSY;
2889 }
2890
Merav Sicron51c1a582012-03-18 10:33:38 +00002891 DP(NETIF_MSG_TX_QUEUED,
2892 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002893 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002894 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2895
2896 eth = (struct ethhdr *)skb->data;
2897
2898 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2899 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2900 if (is_broadcast_ether_addr(eth->h_dest))
2901 mac_type = BROADCAST_ADDRESS;
2902 else
2903 mac_type = MULTICAST_ADDRESS;
2904 }
2905
2906#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2907 /* First, check if we need to linearize the skb (due to FW
2908 restrictions). No need to check fragmentation if page size > 8K
2909 (there will be no violation to FW restrictions) */
2910 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2911 /* Statistics of linearization */
2912 bp->lin_cnt++;
2913 if (skb_linearize(skb) != 0) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002914 DP(NETIF_MSG_TX_QUEUED,
2915 "SKB linearization failed - silently dropping this SKB\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002916 dev_kfree_skb_any(skb);
2917 return NETDEV_TX_OK;
2918 }
2919 }
2920#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002921 /* Map skb linear data for DMA */
2922 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2923 skb_headlen(skb), DMA_TO_DEVICE);
2924 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Merav Sicron51c1a582012-03-18 10:33:38 +00002925 DP(NETIF_MSG_TX_QUEUED,
2926 "SKB mapping failed - silently dropping this SKB\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002927 dev_kfree_skb_any(skb);
2928 return NETDEV_TX_OK;
2929 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002930 /*
2931 Please read carefully. First we use one BD which we mark as start,
2932 then we have a parsing info BD (used for TSO or xsum),
2933 and only then we have the rest of the TSO BDs.
2934 (don't forget to mark the last one as last,
2935 and to unmap only AFTER you write to the BD ...)
2936 And above all, all pdb sizes are in words - NOT DWORDS!
2937 */
2938
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002939 /* get current pkt produced now - advance it just before sending packet
2940 * since mapping of pages may fail and cause packet to be dropped
2941 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002942 pkt_prod = txdata->tx_pkt_prod;
2943 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002944
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002945 /* get a tx_buf and first BD
2946 * tx_start_bd may be changed during SPLIT,
2947 * but first_bd will always stay first
2948 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002949 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2950 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002951 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002952
2953 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002954 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2955 mac_type);
2956
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002957 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002958 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002959
2960 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002961 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002962 tx_buf->skb = skb;
2963 tx_buf->flags = 0;
2964
2965 DP(NETIF_MSG_TX_QUEUED,
2966 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002967 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002968
Jesse Grosseab6d182010-10-20 13:56:03 +00002969 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002970 tx_start_bd->vlan_or_ethertype =
2971 cpu_to_le16(vlan_tx_tag_get(skb));
2972 tx_start_bd->bd_flags.as_bitfield |=
2973 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002974 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002975 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002976
2977 /* turn on parsing and get a BD */
2978 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002979
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002980 if (xmit_type & XMIT_CSUM)
2981 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002982
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002983 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002984 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002985 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2986 /* Set PBD in checksum offload case */
2987 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002988 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2989 &pbd_e2_parsing_data,
2990 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002991 if (IS_MF_SI(bp)) {
2992 /*
2993 * fill in the MAC addresses in the PBD - for local
2994 * switching
2995 */
2996 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2997 &pbd_e2->src_mac_addr_mid,
2998 &pbd_e2->src_mac_addr_lo,
2999 eth->h_source);
3000 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3001 &pbd_e2->dst_mac_addr_mid,
3002 &pbd_e2->dst_mac_addr_lo,
3003 eth->h_dest);
3004 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003005 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00003006 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003007 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3008 /* Set PBD in checksum offload case */
3009 if (xmit_type & XMIT_CSUM)
3010 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003011
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003012 }
3013
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003014 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003015 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3016 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003017 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003018 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3019 pkt_size = tx_start_bd->nbytes;
3020
Merav Sicron51c1a582012-03-18 10:33:38 +00003021 DP(NETIF_MSG_TX_QUEUED,
3022 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003023 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3024 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003025 tx_start_bd->bd_flags.as_bitfield,
3026 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003027
3028 if (xmit_type & XMIT_GSO) {
3029
3030 DP(NETIF_MSG_TX_QUEUED,
3031 "TSO packet len %d hlen %d total len %d tso size %d\n",
3032 skb->len, hlen, skb_headlen(skb),
3033 skb_shinfo(skb)->gso_size);
3034
3035 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3036
3037 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00003038 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3039 &tx_start_bd, hlen,
3040 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003041 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003042 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3043 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003044 else
3045 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003046 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00003047
3048 /* Set the PBD's parsing_data field if not zero
3049 * (for the chips newer than 57711).
3050 */
3051 if (pbd_e2_parsing_data)
3052 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3053
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003054 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3055
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003056 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003057 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3058 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3059
Eric Dumazet9e903e02011-10-18 21:00:24 +00003060 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3061 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00003063 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003064
Merav Sicron51c1a582012-03-18 10:33:38 +00003065 DP(NETIF_MSG_TX_QUEUED,
3066 "Unable to map page - dropping packet...\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003067
3068 /* we need unmap all buffers already mapped
3069 * for this SKB;
3070 * first_bd->nbd need to be properly updated
3071 * before call to bnx2x_free_tx_pkt
3072 */
3073 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003074 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00003075 TX_BD(txdata->tx_pkt_prod),
3076 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003077 return NETDEV_TX_OK;
3078 }
3079
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003080 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00003081 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003082 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00003083 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003084
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003085 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3086 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00003087 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3088 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003089 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003090
3091 DP(NETIF_MSG_TX_QUEUED,
3092 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3093 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3094 le16_to_cpu(tx_data_bd->nbytes));
3095 }
3096
3097 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3098
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003099 /* update with actual num BDs */
3100 first_bd->nbd = cpu_to_le16(nbd);
3101
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003102 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3103
3104 /* now send a tx doorbell, counting the next BD
3105 * if the packet contains or ends with it
3106 */
3107 if (TX_BD_POFF(bd_prod) < nbd)
3108 nbd++;
3109
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003110 /* total_pkt_bytes should be set on the first data BD if
3111 * it's not an LSO packet and there is more than one
3112 * data BD. In this case pkt_size is limited by an MTU value.
3113 * However we prefer to set it for an LSO packet (while we don't
3114 * have to) in order to save some CPU cycles in a none-LSO
3115 * case, when we much more care about them.
3116 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003117 if (total_pkt_bd != NULL)
3118 total_pkt_bd->total_pkt_bytes = pkt_size;
3119
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003120 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121 DP(NETIF_MSG_TX_QUEUED,
Merav Sicron51c1a582012-03-18 10:33:38 +00003122 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003123 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3124 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3125 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3126 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003127 if (pbd_e2)
3128 DP(NETIF_MSG_TX_QUEUED,
3129 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3130 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3131 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3132 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3133 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003134 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3135
Tom Herbert2df1a702011-11-28 16:33:37 +00003136 netdev_tx_sent_queue(txq, skb->len);
3137
Willem de Bruijn8373c572012-04-27 09:04:06 +00003138 skb_tx_timestamp(skb);
3139
Ariel Elior6383c0b2011-07-14 08:31:57 +00003140 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003141 /*
3142 * Make sure that the BD data is updated before updating the producer
3143 * since FW might read the BD right after the producer is updated.
3144 * This is only applicable for weak-ordered memory model archs such
3145 * as IA-64. The following barrier is also mandatory since FW will
3146 * assumes packets must have BDs.
3147 */
3148 wmb();
3149
Ariel Elior6383c0b2011-07-14 08:31:57 +00003150 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003151 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003152
Ariel Elior6383c0b2011-07-14 08:31:57 +00003153 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003154
3155 mmiowb();
3156
Ariel Elior6383c0b2011-07-14 08:31:57 +00003157 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003158
Ariel Elior6383c0b2011-07-14 08:31:57 +00003159 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003160 netif_tx_stop_queue(txq);
3161
3162 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3163 * ordering of set_bit() in netif_tx_stop_queue() and read of
3164 * fp->bd_tx_cons */
3165 smp_mb();
3166
3167 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003168 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003169 netif_tx_wake_queue(txq);
3170 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00003171 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003172
3173 return NETDEV_TX_OK;
3174}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003175
Ariel Elior6383c0b2011-07-14 08:31:57 +00003176/**
3177 * bnx2x_setup_tc - routine to configure net_device for multi tc
3178 *
3179 * @netdev: net device to configure
3180 * @tc: number of traffic classes to enable
3181 *
3182 * callback connected to the ndo_setup_tc function pointer
3183 */
3184int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3185{
3186 int cos, prio, count, offset;
3187 struct bnx2x *bp = netdev_priv(dev);
3188
3189 /* setup tc must be called under rtnl lock */
3190 ASSERT_RTNL();
3191
3192 /* no traffic classes requested. aborting */
3193 if (!num_tc) {
3194 netdev_reset_tc(dev);
3195 return 0;
3196 }
3197
3198 /* requested to support too many traffic classes */
3199 if (num_tc > bp->max_cos) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003200 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3201 num_tc, bp->max_cos);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003202 return -EINVAL;
3203 }
3204
3205 /* declare amount of supported traffic classes */
3206 if (netdev_set_num_tc(dev, num_tc)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003207 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003208 return -EINVAL;
3209 }
3210
3211 /* configure priority to traffic class mapping */
3212 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3213 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Merav Sicron51c1a582012-03-18 10:33:38 +00003214 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3215 "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003216 prio, bp->prio_to_cos[prio]);
3217 }
3218
3219
3220 /* Use this configuration to diffrentiate tc0 from other COSes
3221 This can be used for ets or pfc, and save the effort of setting
3222 up a multio class queue disc or negotiating DCBX with a switch
3223 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00003224 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003225 for (prio = 1; prio < 16; prio++) {
3226 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00003227 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003228 } */
3229
3230 /* configure traffic class to transmission queue mapping */
3231 for (cos = 0; cos < bp->max_cos; cos++) {
3232 count = BNX2X_NUM_ETH_QUEUES(bp);
3233 offset = cos * MAX_TXQS_PER_COS;
3234 netdev_set_tc_queue(dev, cos, count, offset);
Merav Sicron51c1a582012-03-18 10:33:38 +00003235 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3236 "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003237 cos, offset, count);
3238 }
3239
3240 return 0;
3241}
3242
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003243/* called with rtnl_lock */
3244int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3245{
3246 struct sockaddr *addr = p;
3247 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003248 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003249
Merav Sicron51c1a582012-03-18 10:33:38 +00003250 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3251 BNX2X_ERR("Requested MAC address is not valid\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003252 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003253 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003254
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003255#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003256 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3257 !is_zero_ether_addr(addr->sa_data)) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003258 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003259 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003260 }
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003261#endif
3262
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003263 if (netif_running(dev)) {
3264 rc = bnx2x_set_eth_mac(bp, false);
3265 if (rc)
3266 return rc;
3267 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003268
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003269 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003270 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3271
3272 if (netif_running(dev))
3273 rc = bnx2x_set_eth_mac(bp, true);
3274
3275 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003276}
3277
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003278static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3279{
3280 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3281 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003282 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003283
3284 /* Common */
3285#ifdef BCM_CNIC
3286 if (IS_FCOE_IDX(fp_index)) {
3287 memset(sb, 0, sizeof(union host_hc_status_block));
3288 fp->status_blk_mapping = 0;
3289
3290 } else {
3291#endif
3292 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003293 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003294 BNX2X_PCI_FREE(sb->e2_sb,
3295 bnx2x_fp(bp, fp_index,
3296 status_blk_mapping),
3297 sizeof(struct host_hc_status_block_e2));
3298 else
3299 BNX2X_PCI_FREE(sb->e1x_sb,
3300 bnx2x_fp(bp, fp_index,
3301 status_blk_mapping),
3302 sizeof(struct host_hc_status_block_e1x));
3303#ifdef BCM_CNIC
3304 }
3305#endif
3306 /* Rx */
3307 if (!skip_rx_queue(bp, fp_index)) {
3308 bnx2x_free_rx_bds(fp);
3309
3310 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3311 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3312 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3313 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3314 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3315
3316 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3317 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3318 sizeof(struct eth_fast_path_rx_cqe) *
3319 NUM_RCQ_BD);
3320
3321 /* SGE ring */
3322 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3323 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3324 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3325 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3326 }
3327
3328 /* Tx */
3329 if (!skip_tx_queue(bp, fp_index)) {
3330 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003331 for_each_cos_in_tx_queue(fp, cos) {
3332 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3333
Merav Sicron51c1a582012-03-18 10:33:38 +00003334 DP(NETIF_MSG_IFDOWN,
Joe Perches94f05b02011-08-14 12:16:20 +00003335 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003336 fp_index, cos, txdata->cid);
3337
3338 BNX2X_FREE(txdata->tx_buf_ring);
3339 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3340 txdata->tx_desc_mapping,
3341 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3342 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003343 }
3344 /* end of fastpath */
3345}
3346
3347void bnx2x_free_fp_mem(struct bnx2x *bp)
3348{
3349 int i;
3350 for_each_queue(bp, i)
3351 bnx2x_free_fp_mem_at(bp, i);
3352}
3353
Eric Dumazet1191cb82012-04-27 21:39:21 +00003354static void set_sb_shortcuts(struct bnx2x *bp, int index)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003355{
3356 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003357 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003358 bnx2x_fp(bp, index, sb_index_values) =
3359 (__le16 *)status_blk.e2_sb->sb.index_values;
3360 bnx2x_fp(bp, index, sb_running_index) =
3361 (__le16 *)status_blk.e2_sb->sb.running_index;
3362 } else {
3363 bnx2x_fp(bp, index, sb_index_values) =
3364 (__le16 *)status_blk.e1x_sb->sb.index_values;
3365 bnx2x_fp(bp, index, sb_running_index) =
3366 (__le16 *)status_blk.e1x_sb->sb.running_index;
3367 }
3368}
3369
Eric Dumazet1191cb82012-04-27 21:39:21 +00003370/* Returns the number of actually allocated BDs */
3371static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3372 int rx_ring_size)
3373{
3374 struct bnx2x *bp = fp->bp;
3375 u16 ring_prod, cqe_ring_prod;
3376 int i, failure_cnt = 0;
3377
3378 fp->rx_comp_cons = 0;
3379 cqe_ring_prod = ring_prod = 0;
3380
3381 /* This routine is called only during fo init so
3382 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3383 */
3384 for (i = 0; i < rx_ring_size; i++) {
3385 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3386 failure_cnt++;
3387 continue;
3388 }
3389 ring_prod = NEXT_RX_IDX(ring_prod);
3390 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3391 WARN_ON(ring_prod <= (i - failure_cnt));
3392 }
3393
3394 if (failure_cnt)
3395 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3396 i - failure_cnt, fp->index);
3397
3398 fp->rx_bd_prod = ring_prod;
3399 /* Limit the CQE producer by the CQE ring size */
3400 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3401 cqe_ring_prod);
3402 fp->rx_pkt = fp->rx_calls = 0;
3403
3404 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3405
3406 return i - failure_cnt;
3407}
3408
3409static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3410{
3411 int i;
3412
3413 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3414 struct eth_rx_cqe_next_page *nextpg;
3415
3416 nextpg = (struct eth_rx_cqe_next_page *)
3417 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3418 nextpg->addr_hi =
3419 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3420 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3421 nextpg->addr_lo =
3422 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3423 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3424 }
3425}
3426
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003427static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3428{
3429 union host_hc_status_block *sb;
3430 struct bnx2x_fastpath *fp = &bp->fp[index];
3431 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003432 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003433 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003434
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003435#ifdef BCM_CNIC
Barak Witkowskia3348722012-04-23 03:04:46 +00003436 if (!bp->rx_ring_size &&
3437 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003438 rx_ring_size = MIN_RX_SIZE_NONTPA;
3439 bp->rx_ring_size = rx_ring_size;
3440 } else
3441#endif
David S. Miller8decf862011-09-22 03:23:13 -04003442 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003443 u32 cfg = SHMEM_RD(bp,
3444 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003445
David S. Miller8decf862011-09-22 03:23:13 -04003446 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3447
Mintz Yuvald760fc32012-02-15 02:10:28 +00003448 /* Dercease ring size for 1G functions */
3449 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3450 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3451 rx_ring_size /= 10;
3452
David S. Miller8decf862011-09-22 03:23:13 -04003453 /* allocate at least number of buffers required by FW */
3454 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3455 MIN_RX_SIZE_TPA, rx_ring_size);
3456
3457 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003458 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003459 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003460
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003461 /* Common */
3462 sb = &bnx2x_fp(bp, index, status_blk);
3463#ifdef BCM_CNIC
3464 if (!IS_FCOE_IDX(index)) {
3465#endif
3466 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003467 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003468 BNX2X_PCI_ALLOC(sb->e2_sb,
3469 &bnx2x_fp(bp, index, status_blk_mapping),
3470 sizeof(struct host_hc_status_block_e2));
3471 else
3472 BNX2X_PCI_ALLOC(sb->e1x_sb,
3473 &bnx2x_fp(bp, index, status_blk_mapping),
3474 sizeof(struct host_hc_status_block_e1x));
3475#ifdef BCM_CNIC
3476 }
3477#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003478
3479 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3480 * set shortcuts for it.
3481 */
3482 if (!IS_FCOE_IDX(index))
3483 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003484
3485 /* Tx */
3486 if (!skip_tx_queue(bp, index)) {
3487 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003488 for_each_cos_in_tx_queue(fp, cos) {
3489 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3490
Merav Sicron51c1a582012-03-18 10:33:38 +00003491 DP(NETIF_MSG_IFUP,
3492 "allocating tx memory of fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003493 index, cos);
3494
3495 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003496 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003497 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3498 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003499 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003500 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003501 }
3502
3503 /* Rx */
3504 if (!skip_rx_queue(bp, index)) {
3505 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3506 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3507 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3508 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3509 &bnx2x_fp(bp, index, rx_desc_mapping),
3510 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3511
3512 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3513 &bnx2x_fp(bp, index, rx_comp_mapping),
3514 sizeof(struct eth_fast_path_rx_cqe) *
3515 NUM_RCQ_BD);
3516
3517 /* SGE ring */
3518 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3519 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3520 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3521 &bnx2x_fp(bp, index, rx_sge_mapping),
3522 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3523 /* RX BD ring */
3524 bnx2x_set_next_page_rx_bd(fp);
3525
3526 /* CQ ring */
3527 bnx2x_set_next_page_rx_cq(fp);
3528
3529 /* BDs */
3530 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3531 if (ring_size < rx_ring_size)
3532 goto alloc_mem_err;
3533 }
3534
3535 return 0;
3536
3537/* handles low memory cases */
3538alloc_mem_err:
3539 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3540 index, ring_size);
3541 /* FW will drop all packets if queue is not big enough,
3542 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003543 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003544 */
3545 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003546 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003547 /* release memory allocated for this queue */
3548 bnx2x_free_fp_mem_at(bp, index);
3549 return -ENOMEM;
3550 }
3551 return 0;
3552}
3553
3554int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3555{
3556 int i;
3557
3558 /**
3559 * 1. Allocate FP for leading - fatal if error
3560 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003561 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3562 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003563 */
3564
3565 /* leading */
3566 if (bnx2x_alloc_fp_mem_at(bp, 0))
3567 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003568
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003569#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003570 if (!NO_FCOE(bp))
3571 /* FCoE */
3572 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3573 /* we will fail load process instead of mark
3574 * NO_FCOE_FLAG
3575 */
3576 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003577#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003578
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003579 /* RSS */
3580 for_each_nondefault_eth_queue(bp, i)
3581 if (bnx2x_alloc_fp_mem_at(bp, i))
3582 break;
3583
3584 /* handle memory failures */
3585 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3586 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3587
3588 WARN_ON(delta < 0);
3589#ifdef BCM_CNIC
3590 /**
3591 * move non eth FPs next to last eth FP
3592 * must be done in that order
3593 * FCOE_IDX < FWD_IDX < OOO_IDX
3594 */
3595
Ariel Elior6383c0b2011-07-14 08:31:57 +00003596 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003597 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3598#endif
3599 bp->num_queues -= delta;
3600 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3601 bp->num_queues + delta, bp->num_queues);
3602 }
3603
3604 return 0;
3605}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003606
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003607void bnx2x_free_mem_bp(struct bnx2x *bp)
3608{
3609 kfree(bp->fp);
3610 kfree(bp->msix_table);
3611 kfree(bp->ilt);
3612}
3613
3614int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3615{
3616 struct bnx2x_fastpath *fp;
3617 struct msix_entry *tbl;
3618 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003619 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003620
Ariel Elior6383c0b2011-07-14 08:31:57 +00003621 /*
3622 * The biggest MSI-X table we might need is as a maximum number of fast
3623 * path IGU SBs plus default SB (for PF).
3624 */
3625 msix_table_size = bp->igu_sb_cnt + 1;
3626
3627 /* fp array: RSS plus CNIC related L2 queues */
Thomas Meyer01e23742011-11-29 11:08:00 +00003628 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003629 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003630 if (!fp)
3631 goto alloc_err;
3632 bp->fp = fp;
3633
3634 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003635 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003636 if (!tbl)
3637 goto alloc_err;
3638 bp->msix_table = tbl;
3639
3640 /* ilt */
3641 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3642 if (!ilt)
3643 goto alloc_err;
3644 bp->ilt = ilt;
3645
3646 return 0;
3647alloc_err:
3648 bnx2x_free_mem_bp(bp);
3649 return -ENOMEM;
3650
3651}
3652
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003653int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003654{
3655 struct bnx2x *bp = netdev_priv(dev);
3656
3657 if (unlikely(!netif_running(dev)))
3658 return 0;
3659
3660 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3661 return bnx2x_nic_load(bp, LOAD_NORMAL);
3662}
3663
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003664int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3665{
3666 u32 sel_phy_idx = 0;
3667 if (bp->link_params.num_phys <= 1)
3668 return INT_PHY;
3669
3670 if (bp->link_vars.link_up) {
3671 sel_phy_idx = EXT_PHY1;
3672 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3673 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3674 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3675 sel_phy_idx = EXT_PHY2;
3676 } else {
3677
3678 switch (bnx2x_phy_selection(&bp->link_params)) {
3679 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3680 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3681 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3682 sel_phy_idx = EXT_PHY1;
3683 break;
3684 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3685 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3686 sel_phy_idx = EXT_PHY2;
3687 break;
3688 }
3689 }
3690
3691 return sel_phy_idx;
3692
3693}
3694int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3695{
3696 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3697 /*
3698 * The selected actived PHY is always after swapping (in case PHY
3699 * swapping is enabled). So when swapping is enabled, we need to reverse
3700 * the configuration
3701 */
3702
3703 if (bp->link_params.multi_phy_config &
3704 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3705 if (sel_phy_idx == EXT_PHY1)
3706 sel_phy_idx = EXT_PHY2;
3707 else if (sel_phy_idx == EXT_PHY2)
3708 sel_phy_idx = EXT_PHY1;
3709 }
3710 return LINK_CONFIG_IDX(sel_phy_idx);
3711}
3712
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003713#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3714int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3715{
3716 struct bnx2x *bp = netdev_priv(dev);
3717 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3718
3719 switch (type) {
3720 case NETDEV_FCOE_WWNN:
3721 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3722 cp->fcoe_wwn_node_name_lo);
3723 break;
3724 case NETDEV_FCOE_WWPN:
3725 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3726 cp->fcoe_wwn_port_name_lo);
3727 break;
3728 default:
Merav Sicron51c1a582012-03-18 10:33:38 +00003729 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003730 return -EINVAL;
3731 }
3732
3733 return 0;
3734}
3735#endif
3736
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003737/* called with rtnl_lock */
3738int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3739{
3740 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003741
3742 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003743 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003744 return -EAGAIN;
3745 }
3746
3747 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
Merav Sicron51c1a582012-03-18 10:33:38 +00003748 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3749 BNX2X_ERR("Can't support requested MTU size\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003750 return -EINVAL;
Merav Sicron51c1a582012-03-18 10:33:38 +00003751 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003752
3753 /* This does not race with packet allocation
3754 * because the actual alloc size is
3755 * only updated as part of load
3756 */
3757 dev->mtu = new_mtu;
3758
Michał Mirosław66371c42011-04-12 09:38:23 +00003759 return bnx2x_reload_if_running(dev);
3760}
3761
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003762netdev_features_t bnx2x_fix_features(struct net_device *dev,
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003763 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003764{
3765 struct bnx2x *bp = netdev_priv(dev);
3766
3767 /* TPA requires Rx CSUM offloading */
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003768 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003769 features &= ~NETIF_F_LRO;
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003770 features &= ~NETIF_F_GRO;
3771 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003772
3773 return features;
3774}
3775
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003776int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003777{
3778 struct bnx2x *bp = netdev_priv(dev);
3779 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003780 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003781
3782 if (features & NETIF_F_LRO)
3783 flags |= TPA_ENABLE_FLAG;
3784 else
3785 flags &= ~TPA_ENABLE_FLAG;
3786
Dmitry Kravkov621b4d62012-02-20 09:59:08 +00003787 if (features & NETIF_F_GRO)
3788 flags |= GRO_ENABLE_FLAG;
3789 else
3790 flags &= ~GRO_ENABLE_FLAG;
3791
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003792 if (features & NETIF_F_LOOPBACK) {
3793 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3794 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3795 bnx2x_reload = true;
3796 }
3797 } else {
3798 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3799 bp->link_params.loopback_mode = LOOPBACK_NONE;
3800 bnx2x_reload = true;
3801 }
3802 }
3803
Michał Mirosław66371c42011-04-12 09:38:23 +00003804 if (flags ^ bp->flags) {
3805 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003806 bnx2x_reload = true;
3807 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003808
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003809 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003810 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3811 return bnx2x_reload_if_running(dev);
3812 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003813 }
3814
Michał Mirosław66371c42011-04-12 09:38:23 +00003815 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003816}
3817
3818void bnx2x_tx_timeout(struct net_device *dev)
3819{
3820 struct bnx2x *bp = netdev_priv(dev);
3821
3822#ifdef BNX2X_STOP_ON_ERROR
3823 if (!bp->panic)
3824 bnx2x_panic();
3825#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003826
3827 smp_mb__before_clear_bit();
3828 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3829 smp_mb__after_clear_bit();
3830
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003831 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003832 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003833}
3834
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003835int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3836{
3837 struct net_device *dev = pci_get_drvdata(pdev);
3838 struct bnx2x *bp;
3839
3840 if (!dev) {
3841 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3842 return -ENODEV;
3843 }
3844 bp = netdev_priv(dev);
3845
3846 rtnl_lock();
3847
3848 pci_save_state(pdev);
3849
3850 if (!netif_running(dev)) {
3851 rtnl_unlock();
3852 return 0;
3853 }
3854
3855 netif_device_detach(dev);
3856
3857 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3858
3859 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3860
3861 rtnl_unlock();
3862
3863 return 0;
3864}
3865
3866int bnx2x_resume(struct pci_dev *pdev)
3867{
3868 struct net_device *dev = pci_get_drvdata(pdev);
3869 struct bnx2x *bp;
3870 int rc;
3871
3872 if (!dev) {
3873 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3874 return -ENODEV;
3875 }
3876 bp = netdev_priv(dev);
3877
3878 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Merav Sicron51c1a582012-03-18 10:33:38 +00003879 BNX2X_ERR("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003880 return -EAGAIN;
3881 }
3882
3883 rtnl_lock();
3884
3885 pci_restore_state(pdev);
3886
3887 if (!netif_running(dev)) {
3888 rtnl_unlock();
3889 return 0;
3890 }
3891
3892 bnx2x_set_power_state(bp, PCI_D0);
3893 netif_device_attach(dev);
3894
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003895 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3896
3897 rtnl_unlock();
3898
3899 return rc;
3900}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003901
3902
3903void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3904 u32 cid)
3905{
3906 /* ustorm cxt validation */
3907 cxt->ustorm_ag_context.cdu_usage =
3908 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3909 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3910 /* xcontext validation */
3911 cxt->xstorm_ag_context.cdu_reserved =
3912 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3913 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3914}
3915
Eric Dumazet1191cb82012-04-27 21:39:21 +00003916static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3917 u8 fw_sb_id, u8 sb_index,
3918 u8 ticks)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003919{
3920
3921 u32 addr = BAR_CSTRORM_INTMEM +
3922 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3923 REG_WR8(bp, addr, ticks);
Merav Sicron51c1a582012-03-18 10:33:38 +00003924 DP(NETIF_MSG_IFUP,
3925 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3926 port, fw_sb_id, sb_index, ticks);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003927}
3928
Eric Dumazet1191cb82012-04-27 21:39:21 +00003929static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3930 u16 fw_sb_id, u8 sb_index,
3931 u8 disable)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003932{
3933 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3934 u32 addr = BAR_CSTRORM_INTMEM +
3935 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3936 u16 flags = REG_RD16(bp, addr);
3937 /* clear and set */
3938 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3939 flags |= enable_flag;
3940 REG_WR16(bp, addr, flags);
Merav Sicron51c1a582012-03-18 10:33:38 +00003941 DP(NETIF_MSG_IFUP,
3942 "port %x fw_sb_id %d sb_index %d disable %d\n",
3943 port, fw_sb_id, sb_index, disable);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003944}
3945
3946void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3947 u8 sb_index, u8 disable, u16 usec)
3948{
3949 int port = BP_PORT(bp);
3950 u8 ticks = usec / BNX2X_BTR;
3951
3952 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3953
3954 disable = disable ? 1 : (usec ? 0 : 1);
3955 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3956}