blob: aa14502289ce634d9c9296d378cda106ac85e69c [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Ariel Elior85b26ea2012-01-26 06:01:54 +00003 * Copyright (c) 2007-2012 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000026#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034/**
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000035 * bnx2x_move_fp - move content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @from: source FP index
39 * @to: destination FP index
40 *
41 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000042 * intact. This is done by first copying the napi struct from
43 * the target to the source, and then mem copying the entire
44 * source onto the target
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000045 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Ariel Elior72754082011-11-13 04:34:31 +000050
51 /* Copy the NAPI object as it has been already initialized */
52 from_fp->napi = to_fp->napi;
53
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000054 /* Move bnx2x_fastpath contents */
55 memcpy(to_fp, from_fp, sizeof(*to_fp));
56 to_fp->index = to;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000057}
58
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030059int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000061/* free skb in the packet ring at pos idx
62 * return idx of last bd freed
63 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000064static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +000065 u16 idx, unsigned int *pkts_compl,
66 unsigned int *bytes_compl)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000067{
Ariel Elior6383c0b2011-07-14 08:31:57 +000068 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000069 struct eth_tx_start_bd *tx_start_bd;
70 struct eth_tx_bd *tx_data_bd;
71 struct sk_buff *skb = tx_buf->skb;
72 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
73 int nbd;
74
75 /* prefetch skb end pointer to speedup dev_kfree_skb() */
76 prefetch(&skb->end);
77
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030078 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +000079 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000080
81 /* unmap first bd */
82 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +000083 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000084 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000085 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000086
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030087
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000088 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
89#ifdef BNX2X_STOP_ON_ERROR
90 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
91 BNX2X_ERR("BAD nbd!\n");
92 bnx2x_panic();
93 }
94#endif
95 new_cons = nbd + tx_buf->first_bd;
96
97 /* Get the next bd */
98 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99
100 /* Skip a parse bd... */
101 --nbd;
102 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
103
104 /* ...and the TSO split header bd since they have no mapping */
105 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
106 --nbd;
107 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
108 }
109
110 /* now free frags */
111 while (nbd > 0) {
112
113 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000114 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
117 if (--nbd)
118 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
119 }
120
121 /* release skb */
122 WARN_ON(!skb);
Tom Herbert2df1a702011-11-28 16:33:37 +0000123 if (skb) {
124 (*pkts_compl)++;
125 (*bytes_compl) += skb->len;
126 }
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000127 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000128 tx_buf->first_bd = 0;
129 tx_buf->skb = NULL;
130
131 return new_cons;
132}
133
Ariel Elior6383c0b2011-07-14 08:31:57 +0000134int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000135{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000137 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Tom Herbert2df1a702011-11-28 16:33:37 +0000138 unsigned int pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000139
140#ifdef BNX2X_STOP_ON_ERROR
141 if (unlikely(bp->panic))
142 return -1;
143#endif
144
Ariel Elior6383c0b2011-07-14 08:31:57 +0000145 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
146 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
147 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000148
149 while (sw_cons != hw_cons) {
150 u16 pkt_cons;
151
152 pkt_cons = TX_BD(sw_cons);
153
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
155 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000156 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000157
Tom Herbert2df1a702011-11-28 16:33:37 +0000158 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
159 &pkts_compl, &bytes_compl);
160
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000161 sw_cons++;
162 }
163
Tom Herbert2df1a702011-11-28 16:33:37 +0000164 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
165
Ariel Elior6383c0b2011-07-14 08:31:57 +0000166 txdata->tx_pkt_cons = sw_cons;
167 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169 /* Need to make the tx_bd_cons update visible to start_xmit()
170 * before checking for netif_tx_queue_stopped(). Without the
171 * memory barrier, there is a small possibility that
172 * start_xmit() will miss it and cause the queue to be stopped
173 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300174 * On the other hand we need an rmb() here to ensure the proper
175 * ordering of bit testing in the following
176 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000177 */
178 smp_mb();
179
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000180 if (unlikely(netif_tx_queue_stopped(txq))) {
181 /* Taking tx_lock() is needed to prevent reenabling the queue
182 * while it's empty. This could have happen if rx_action() gets
183 * suspended in bnx2x_tx_int() after the condition before
184 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
185 *
186 * stops the queue->sees fresh tx_bd_cons->releases the queue->
187 * sends some packets consuming the whole queue again->
188 * stops the queue
189 */
190
191 __netif_tx_lock(txq, smp_processor_id());
192
193 if ((netif_tx_queue_stopped(txq)) &&
194 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000195 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000196 netif_tx_wake_queue(txq);
197
198 __netif_tx_unlock(txq);
199 }
200 return 0;
201}
202
203static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
204 u16 idx)
205{
206 u16 last_max = fp->last_max_sge;
207
208 if (SUB_S16(idx, last_max) > 0)
209 fp->last_max_sge = idx;
210}
211
212static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
213 struct eth_fast_path_rx_cqe *fp_cqe)
214{
215 struct bnx2x *bp = fp->bp;
216 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
217 le16_to_cpu(fp_cqe->len_on_bd)) >>
218 SGE_PAGE_SHIFT;
219 u16 last_max, last_elem, first_elem;
220 u16 delta = 0;
221 u16 i;
222
223 if (!sge_len)
224 return;
225
226 /* First mark all used pages */
227 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300228 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000229 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000230
231 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000232 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000233
234 /* Here we assume that the last SGE index is the biggest */
235 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000236 bnx2x_update_last_max_sge(fp,
237 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000238
239 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300240 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
241 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000242
243 /* If ring is not full */
244 if (last_elem + 1 != first_elem)
245 last_elem++;
246
247 /* Now update the prod */
248 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
249 if (likely(fp->sge_mask[i]))
250 break;
251
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300252 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
253 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000254 }
255
256 if (delta > 0) {
257 fp->rx_sge_prod += delta;
258 /* clear page-end entries */
259 bnx2x_clear_sge_mask_next_elems(fp);
260 }
261
262 DP(NETIF_MSG_RX_STATUS,
263 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
264 fp->last_max_sge, fp->rx_sge_prod);
265}
266
Eric Dumazete52fcb22011-11-14 06:05:34 +0000267/* Set Toeplitz hash value in the skb using the value from the
268 * CQE (calculated by HW).
269 */
270static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
271 const struct eth_fast_path_rx_cqe *cqe)
272{
273 /* Set Toeplitz hash from CQE */
274 if ((bp->dev->features & NETIF_F_RXHASH) &&
275 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
276 return le32_to_cpu(cqe->rss_hash_result);
277 return 0;
278}
279
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000280static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000281 u16 cons, u16 prod,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300282 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000283{
284 struct bnx2x *bp = fp->bp;
285 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
286 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
287 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
288 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300289 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
290 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000291
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300292 /* print error if current state != stop */
293 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000294 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
295
Eric Dumazete52fcb22011-11-14 06:05:34 +0000296 /* Try to map an empty data buffer from the aggregation info */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300297 mapping = dma_map_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000298 first_buf->data + NET_SKB_PAD,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300299 fp->rx_buf_size, DMA_FROM_DEVICE);
300 /*
301 * ...if it fails - move the skb from the consumer to the producer
302 * and set the current aggregation state as ERROR to drop it
303 * when TPA_STOP arrives.
304 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000305
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300306 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
307 /* Move the BD from the consumer to the producer */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000308 bnx2x_reuse_rx_data(fp, cons, prod);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300309 tpa_info->tpa_state = BNX2X_TPA_ERROR;
310 return;
311 }
312
Eric Dumazete52fcb22011-11-14 06:05:34 +0000313 /* move empty data from pool to prod */
314 prod_rx_buf->data = first_buf->data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300315 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000316 /* point prod_bd to new data */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
318 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
319
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300320 /* move partial skb from cons to pool (don't unmap yet) */
321 *first_buf = *cons_rx_buf;
322
323 /* mark bin state as START */
324 tpa_info->parsing_flags =
325 le16_to_cpu(cqe->pars_flags.flags);
326 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
327 tpa_info->tpa_state = BNX2X_TPA_START;
328 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
329 tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000330 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300331
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000332#ifdef BNX2X_STOP_ON_ERROR
333 fp->tpa_queue_used |= (1 << queue);
334#ifdef _ASM_GENERIC_INT_L64_H
335 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
336#else
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
338#endif
339 fp->tpa_queue_used);
340#endif
341}
342
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000343/* Timestamp option length allowed for TPA aggregation:
344 *
345 * nop nop kind length echo val
346 */
347#define TPA_TSTAMP_OPT_LEN 12
348/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000349 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000350 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000351 * @bp: driver handle
352 * @parsing_flags: parsing flags from the START CQE
353 * @len_on_bd: total length of the first packet for the
354 * aggregation.
355 *
356 * Approximate value of the MSS for this aggregation calculated using
357 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000358 */
359static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
360 u16 len_on_bd)
361{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300362 /*
363 * TPA arrgregation won't have either IP options or TCP options
364 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000365 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300366 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
367
368 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
369 PRS_FLAG_OVERETH_IPV6)
370 hdrs_len += sizeof(struct ipv6hdr);
371 else /* IPv4 */
372 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000373
374
375 /* Check if there was a TCP timestamp, if there is it's will
376 * always be 12 bytes length: nop nop kind length echo val.
377 *
378 * Otherwise FW would close the aggregation.
379 */
380 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
381 hdrs_len += TPA_TSTAMP_OPT_LEN;
382
383 return len_on_bd - hdrs_len;
384}
385
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000386static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300387 u16 queue, struct sk_buff *skb,
388 struct eth_end_agg_rx_cqe *cqe,
389 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000390{
391 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392 u32 i, frag_len, frag_size, pages;
393 int err;
394 int j;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300395 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
396 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000397
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300398 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000399 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
400
401 /* This is needed in order to enable forwarding support */
402 if (frag_size)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300403 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
404 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000405
406#ifdef BNX2X_STOP_ON_ERROR
407 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
408 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
409 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300410 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000411 bnx2x_panic();
412 return -EINVAL;
413 }
414#endif
415
416 /* Run through the SGL and compose the fragmented skb */
417 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300418 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000419
420 /* FW gives the indices of the SGE as if the ring is an array
421 (meaning that "next" element will consume 2 indices) */
422 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
423 rx_pg = &fp->rx_page_ring[sge_idx];
424 old_rx_pg = *rx_pg;
425
426 /* If we fail to allocate a substitute page, we simply stop
427 where we are and drop the whole packet */
428 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
429 if (unlikely(err)) {
430 fp->eth_q_stats.rx_skb_alloc_failed++;
431 return err;
432 }
433
434 /* Unmap the page as we r going to pass it to the stack */
435 dma_unmap_page(&bp->pdev->dev,
436 dma_unmap_addr(&old_rx_pg, mapping),
437 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
438
439 /* Add one frag and update the appropriate fields in the skb */
440 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
441
442 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000443 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000444 skb->len += frag_len;
445
446 frag_size -= frag_len;
447 }
448
449 return 0;
450}
451
452static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300453 u16 queue, struct eth_end_agg_rx_cqe *cqe,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000454 u16 cqe_idx)
455{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300456 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
457 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000458 u32 pad = tpa_info->placement_offset;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300459 u16 len = tpa_info->len_on_bd;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000460 struct sk_buff *skb = NULL;
461 u8 *data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000462 /* alloc new skb */
Eric Dumazete52fcb22011-11-14 06:05:34 +0000463 u8 *new_data;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 u8 old_tpa_state = tpa_info->tpa_state;
465
466 tpa_info->tpa_state = BNX2X_TPA_STOP;
467
468 /* If we there was an error during the handling of the TPA_START -
469 * drop this aggregation.
470 */
471 if (old_tpa_state == BNX2X_TPA_ERROR)
472 goto drop;
473
Eric Dumazete52fcb22011-11-14 06:05:34 +0000474 /* Try to allocate the new data */
475 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000476
477 /* Unmap skb in the pool anyway, as we are going to change
478 pool entry status to BNX2X_TPA_STOP even if new skb allocation
479 fails. */
480 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800481 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000482 if (likely(new_data))
483 skb = build_skb(data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000484
Eric Dumazete52fcb22011-11-14 06:05:34 +0000485 if (likely(skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000486#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800487 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000488 BNX2X_ERR("skb_put is about to fail... "
489 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800490 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000491 bnx2x_panic();
492 return;
493 }
494#endif
495
Eric Dumazete52fcb22011-11-14 06:05:34 +0000496 skb_reserve(skb, pad + NET_SKB_PAD);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497 skb_put(skb, len);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000498 skb->rxhash = tpa_info->rxhash;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000499
500 skb->protocol = eth_type_trans(skb, bp->dev);
501 skb->ip_summed = CHECKSUM_UNNECESSARY;
502
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300503 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
504 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
505 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000506 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000507 } else {
508 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
509 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000510 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000511 }
512
513
Eric Dumazete52fcb22011-11-14 06:05:34 +0000514 /* put new data in bin */
515 rx_buf->data = new_data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000516
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300517 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000518 }
Jesper Juhl3f61cd82012-02-06 11:28:21 +0000519 kfree(new_data);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300520drop:
521 /* drop the packet and keep the buffer in the bin */
522 DP(NETIF_MSG_RX_STATUS,
523 "Failed to allocate or map a new skb - dropping packet!\n");
524 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000525}
526
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000527
528int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
529{
530 struct bnx2x *bp = fp->bp;
531 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
532 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
533 int rx_pkt = 0;
534
535#ifdef BNX2X_STOP_ON_ERROR
536 if (unlikely(bp->panic))
537 return 0;
538#endif
539
540 /* CQ "next element" is of the size of the regular element,
541 that's why it's ok here */
542 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
543 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
544 hw_comp_cons++;
545
546 bd_cons = fp->rx_bd_cons;
547 bd_prod = fp->rx_bd_prod;
548 bd_prod_fw = bd_prod;
549 sw_comp_cons = fp->rx_comp_cons;
550 sw_comp_prod = fp->rx_comp_prod;
551
552 /* Memory barrier necessary as speculative reads of the rx
553 * buffer can be ahead of the index in the status block
554 */
555 rmb();
556
557 DP(NETIF_MSG_RX_STATUS,
558 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
559 fp->index, hw_comp_cons, sw_comp_cons);
560
561 while (sw_comp_cons != hw_comp_cons) {
562 struct sw_rx_bd *rx_buf = NULL;
563 struct sk_buff *skb;
564 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300565 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000566 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300567 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000568 u16 len, pad;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000569 u8 *data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000570
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300571#ifdef BNX2X_STOP_ON_ERROR
572 if (unlikely(bp->panic))
573 return 0;
574#endif
575
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000576 comp_ring_cons = RCQ_BD(sw_comp_cons);
577 bd_prod = RX_BD(bd_prod);
578 bd_cons = RX_BD(bd_cons);
579
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300581 cqe_fp = &cqe->fast_path_cqe;
582 cqe_fp_flags = cqe_fp->type_error_flags;
583 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000584
585 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
586 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300587 cqe_fp_flags, cqe_fp->status_flags,
588 le32_to_cpu(cqe_fp->rss_hash_result),
589 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000590
591 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300592 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000593 bnx2x_sp_event(fp, cqe);
594 goto next_cqe;
Eric Dumazete52fcb22011-11-14 06:05:34 +0000595 }
596 rx_buf = &fp->rx_buf_ring[bd_cons];
597 data = rx_buf->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598
Eric Dumazete52fcb22011-11-14 06:05:34 +0000599 if (!CQE_TYPE_FAST(cqe_fp_type)) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300600#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000601 /* sanity check */
602 if (fp->disable_tpa &&
603 (CQE_TYPE_START(cqe_fp_type) ||
604 CQE_TYPE_STOP(cqe_fp_type)))
605 BNX2X_ERR("START/STOP packet while "
606 "disable_tpa type %x\n",
607 CQE_TYPE(cqe_fp_type));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300608#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000609
Eric Dumazete52fcb22011-11-14 06:05:34 +0000610 if (CQE_TYPE_START(cqe_fp_type)) {
611 u16 queue = cqe_fp->queue_index;
612 DP(NETIF_MSG_RX_STATUS,
613 "calling tpa_start on queue %d\n",
614 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000615
Eric Dumazete52fcb22011-11-14 06:05:34 +0000616 bnx2x_tpa_start(fp, queue,
617 bd_cons, bd_prod,
618 cqe_fp);
619 goto next_rx;
620 } else {
621 u16 queue =
622 cqe->end_agg_cqe.queue_index;
623 DP(NETIF_MSG_RX_STATUS,
624 "calling tpa_stop on queue %d\n",
625 queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000626
Eric Dumazete52fcb22011-11-14 06:05:34 +0000627 bnx2x_tpa_stop(bp, fp, queue,
628 &cqe->end_agg_cqe,
629 comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000630#ifdef BNX2X_STOP_ON_ERROR
Eric Dumazete52fcb22011-11-14 06:05:34 +0000631 if (bp->panic)
632 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000633#endif
634
Eric Dumazete52fcb22011-11-14 06:05:34 +0000635 bnx2x_update_sge_prod(fp, cqe_fp);
636 goto next_cqe;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000637 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000638 }
639 /* non TPA */
640 len = le16_to_cpu(cqe_fp->pkt_len);
641 pad = cqe_fp->placement_offset;
642 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000643 dma_unmap_addr(rx_buf, mapping),
Eric Dumazete52fcb22011-11-14 06:05:34 +0000644 pad + RX_COPY_THRESH,
645 DMA_FROM_DEVICE);
646 pad += NET_SKB_PAD;
647 prefetch(data + pad); /* speedup eth_type_trans() */
648 /* is this an error packet? */
649 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
650 DP(NETIF_MSG_RX_ERR,
651 "ERROR flags %x rx packet %u\n",
652 cqe_fp_flags, sw_comp_cons);
653 fp->eth_q_stats.rx_err_discard_pkt++;
654 goto reuse_rx;
655 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000656
Eric Dumazete52fcb22011-11-14 06:05:34 +0000657 /* Since we don't have a jumbo ring
658 * copy small packets if mtu > 1500
659 */
660 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
661 (len <= RX_COPY_THRESH)) {
662 skb = netdev_alloc_skb_ip_align(bp->dev, len);
663 if (skb == NULL) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000664 DP(NETIF_MSG_RX_ERR,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000665 "ERROR packet dropped because of alloc failure\n");
666 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000667 goto reuse_rx;
668 }
Eric Dumazete52fcb22011-11-14 06:05:34 +0000669 memcpy(skb->data, data + pad, len);
670 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
671 } else {
672 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000673 dma_unmap_single(&bp->pdev->dev,
Eric Dumazete52fcb22011-11-14 06:05:34 +0000674 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800675 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000676 DMA_FROM_DEVICE);
Eric Dumazete52fcb22011-11-14 06:05:34 +0000677 skb = build_skb(data);
678 if (unlikely(!skb)) {
679 kfree(data);
680 fp->eth_q_stats.rx_skb_alloc_failed++;
681 goto next_rx;
682 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000683 skb_reserve(skb, pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684 } else {
685 DP(NETIF_MSG_RX_ERR,
686 "ERROR packet dropped because "
687 "of alloc failure\n");
688 fp->eth_q_stats.rx_skb_alloc_failed++;
689reuse_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000690 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000691 goto next_rx;
692 }
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000693 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000694
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000695 skb_put(skb, len);
696 skb->protocol = eth_type_trans(skb, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000697
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000698 /* Set Toeplitz hash for a none-LRO skb */
699 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000700
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000701 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000702
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000703 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300704
Dmitry Kravkov036d2df2011-12-12 23:40:53 +0000705 if (likely(BNX2X_RX_CSUM_OK(cqe)))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 else
708 fp->eth_q_stats.hw_csum_err++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000709 }
710
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000711 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000712
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300713 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
714 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000715 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300716 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000717 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000718
719
720next_rx:
Eric Dumazete52fcb22011-11-14 06:05:34 +0000721 rx_buf->data = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000722
723 bd_cons = NEXT_RX_IDX(bd_cons);
724 bd_prod = NEXT_RX_IDX(bd_prod);
725 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
726 rx_pkt++;
727next_cqe:
728 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
729 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
730
731 if (rx_pkt == budget)
732 break;
733 } /* while */
734
735 fp->rx_bd_cons = bd_cons;
736 fp->rx_bd_prod = bd_prod_fw;
737 fp->rx_comp_cons = sw_comp_cons;
738 fp->rx_comp_prod = sw_comp_prod;
739
740 /* Update producers */
741 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
742 fp->rx_sge_prod);
743
744 fp->rx_pkt += rx_pkt;
745 fp->rx_calls++;
746
747 return rx_pkt;
748}
749
750static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
751{
752 struct bnx2x_fastpath *fp = fp_cookie;
753 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000754 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000755
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000756 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
757 "[fp %d fw_sd %d igusb %d]\n",
758 fp->index, fp->fw_sb_id, fp->igu_sb_id);
759 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000760
761#ifdef BNX2X_STOP_ON_ERROR
762 if (unlikely(bp->panic))
763 return IRQ_HANDLED;
764#endif
765
766 /* Handle Rx and Tx according to MSI-X vector */
767 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000768
769 for_each_cos_in_tx_queue(fp, cos)
770 prefetch(fp->txdata[cos].tx_cons_sb);
771
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000772 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000773 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
774
775 return IRQ_HANDLED;
776}
777
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000778/* HW Lock for shared dual port PHYs */
779void bnx2x_acquire_phy_lock(struct bnx2x *bp)
780{
781 mutex_lock(&bp->port.phy_mutex);
782
783 if (bp->port.need_hw_lock)
784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
785}
786
787void bnx2x_release_phy_lock(struct bnx2x *bp)
788{
789 if (bp->port.need_hw_lock)
790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
791
792 mutex_unlock(&bp->port.phy_mutex);
793}
794
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800795/* calculates MF speed according to current linespeed and MF configuration */
796u16 bnx2x_get_mf_speed(struct bnx2x *bp)
797{
798 u16 line_speed = bp->link_vars.line_speed;
799 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000800 u16 maxCfg = bnx2x_extract_max_cfg(bp,
801 bp->mf_config[BP_VN(bp)]);
802
803 /* Calculate the current MAX line speed limit for the MF
804 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800805 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000806 if (IS_MF_SI(bp))
807 line_speed = (line_speed * maxCfg) / 100;
808 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800809 u16 vn_max_rate = maxCfg * 100;
810
811 if (vn_max_rate < line_speed)
812 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000813 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800814 }
815
816 return line_speed;
817}
818
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000819/**
820 * bnx2x_fill_report_data - fill link report data to report
821 *
822 * @bp: driver handle
823 * @data: link state to update
824 *
825 * It uses a none-atomic bit operations because is called under the mutex.
826 */
827static inline void bnx2x_fill_report_data(struct bnx2x *bp,
828 struct bnx2x_link_report_data *data)
829{
830 u16 line_speed = bnx2x_get_mf_speed(bp);
831
832 memset(data, 0, sizeof(*data));
833
834 /* Fill the report data: efective line speed */
835 data->line_speed = line_speed;
836
837 /* Link is down */
838 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
839 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
840 &data->link_report_flags);
841
842 /* Full DUPLEX */
843 if (bp->link_vars.duplex == DUPLEX_FULL)
844 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
845
846 /* Rx Flow Control is ON */
847 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
848 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
849
850 /* Tx Flow Control is ON */
851 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
852 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
853}
854
855/**
856 * bnx2x_link_report - report link status to OS.
857 *
858 * @bp: driver handle
859 *
860 * Calls the __bnx2x_link_report() under the same locking scheme
861 * as a link/PHY state managing code to ensure a consistent link
862 * reporting.
863 */
864
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865void bnx2x_link_report(struct bnx2x *bp)
866{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000867 bnx2x_acquire_phy_lock(bp);
868 __bnx2x_link_report(bp);
869 bnx2x_release_phy_lock(bp);
870}
871
872/**
873 * __bnx2x_link_report - report link status to OS.
874 *
875 * @bp: driver handle
876 *
877 * None atomic inmlementation.
878 * Should be called under the phy_lock.
879 */
880void __bnx2x_link_report(struct bnx2x *bp)
881{
882 struct bnx2x_link_report_data cur_data;
883
884 /* reread mf_cfg */
885 if (!CHIP_IS_E1(bp))
886 bnx2x_read_mf_cfg(bp);
887
888 /* Read the current link report info */
889 bnx2x_fill_report_data(bp, &cur_data);
890
891 /* Don't report link down or exactly the same link status twice */
892 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
893 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
894 &bp->last_reported_link.link_report_flags) &&
895 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
896 &cur_data.link_report_flags)))
897 return;
898
899 bp->link_cnt++;
900
901 /* We are going to report a new link parameters now -
902 * remember the current data for the next time.
903 */
904 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
905
906 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
907 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000908 netif_carrier_off(bp->dev);
909 netdev_err(bp->dev, "NIC Link is Down\n");
910 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000911 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000912 const char *duplex;
913 const char *flow;
914
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000915 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000916
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000917 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
918 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000919 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000920 else
Joe Perches94f05b02011-08-14 12:16:20 +0000921 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000922
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000923 /* Handle the FC at the end so that only these flags would be
924 * possibly set. This way we may easily check if there is no FC
925 * enabled.
926 */
927 if (cur_data.link_report_flags) {
928 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
929 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000930 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
931 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000932 flow = "ON - receive & transmit";
933 else
934 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000935 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000936 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000937 }
Joe Perches94f05b02011-08-14 12:16:20 +0000938 } else {
939 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000940 }
Joe Perches94f05b02011-08-14 12:16:20 +0000941 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
942 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000943 }
944}
945
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000946void bnx2x_init_rx_rings(struct bnx2x *bp)
947{
948 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 u16 ring_prod;
950 int i, j;
951
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000952 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000953 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000954 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000955
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800956 DP(NETIF_MSG_IFUP,
957 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
958
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000959 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300960 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -0400961 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300962 struct bnx2x_agg_info *tpa_info =
963 &fp->tpa_info[i];
964 struct sw_rx_bd *first_buf =
965 &tpa_info->first_buf;
966
Eric Dumazete52fcb22011-11-14 06:05:34 +0000967 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
968 GFP_ATOMIC);
969 if (!first_buf->data) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000970 BNX2X_ERR("Failed to allocate TPA "
971 "skb pool for queue[%d] - "
972 "disabling TPA on this "
973 "queue!\n", j);
974 bnx2x_free_tpa_pool(bp, fp, i);
975 fp->disable_tpa = 1;
976 break;
977 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300978 dma_unmap_addr_set(first_buf, mapping, 0);
979 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000981
982 /* "next page" elements initialization */
983 bnx2x_set_next_page_sgl(fp);
984
985 /* set SGEs bit mask */
986 bnx2x_init_sge_ring_bit_mask(fp);
987
988 /* Allocate SGEs and initialize the ring elements */
989 for (i = 0, ring_prod = 0;
990 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
991
992 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
993 BNX2X_ERR("was only able to allocate "
994 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300995 BNX2X_ERR("disabling TPA for "
996 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000997 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300998 bnx2x_free_rx_sge_range(bp, fp,
999 ring_prod);
1000 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001001 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001002 fp->disable_tpa = 1;
1003 ring_prod = 0;
1004 break;
1005 }
1006 ring_prod = NEXT_SGE_IDX(ring_prod);
1007 }
1008
1009 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010 }
1011 }
1012
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001013 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001014 struct bnx2x_fastpath *fp = &bp->fp[j];
1015
1016 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001017
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001018 /* Activate BD ring */
1019 /* Warning!
1020 * this will generate an interrupt (to the TSTORM)
1021 * must only be done after chip is initialized
1022 */
1023 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1024 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001025
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001026 if (j != 0)
1027 continue;
1028
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001029 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001030 REG_WR(bp, BAR_USTRORM_INTMEM +
1031 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1032 U64_LO(fp->rx_comp_mapping));
1033 REG_WR(bp, BAR_USTRORM_INTMEM +
1034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1035 U64_HI(fp->rx_comp_mapping));
1036 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001037 }
1038}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001039
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001040static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1041{
1042 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001043 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001044
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001045 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001046 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001047 for_each_cos_in_tx_queue(fp, cos) {
1048 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Tom Herbert2df1a702011-11-28 16:33:37 +00001049 unsigned pkts_compl = 0, bytes_compl = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001050
Ariel Elior6383c0b2011-07-14 08:31:57 +00001051 u16 sw_prod = txdata->tx_pkt_prod;
1052 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001053
Ariel Elior6383c0b2011-07-14 08:31:57 +00001054 while (sw_cons != sw_prod) {
Tom Herbert2df1a702011-11-28 16:33:37 +00001055 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1056 &pkts_compl, &bytes_compl);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001057 sw_cons++;
1058 }
Tom Herbert2df1a702011-11-28 16:33:37 +00001059 netdev_tx_reset_queue(
1060 netdev_get_tx_queue(bp->dev, txdata->txq_index));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061 }
1062 }
1063}
1064
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001065static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1066{
1067 struct bnx2x *bp = fp->bp;
1068 int i;
1069
1070 /* ring wasn't allocated */
1071 if (fp->rx_buf_ring == NULL)
1072 return;
1073
1074 for (i = 0; i < NUM_RX_BD; i++) {
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001076 u8 *data = rx_buf->data;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001077
Eric Dumazete52fcb22011-11-14 06:05:34 +00001078 if (data == NULL)
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001079 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001080 dma_unmap_single(&bp->pdev->dev,
1081 dma_unmap_addr(rx_buf, mapping),
1082 fp->rx_buf_size, DMA_FROM_DEVICE);
1083
Eric Dumazete52fcb22011-11-14 06:05:34 +00001084 rx_buf->data = NULL;
1085 kfree(data);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001086 }
1087}
1088
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1090{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001091 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001092
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001093 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001094 struct bnx2x_fastpath *fp = &bp->fp[j];
1095
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001096 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001097
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001099 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001100 }
1101}
1102
1103void bnx2x_free_skbs(struct bnx2x *bp)
1104{
1105 bnx2x_free_tx_skbs(bp);
1106 bnx2x_free_rx_skbs(bp);
1107}
1108
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001109void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1110{
1111 /* load old values */
1112 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1113
1114 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1115 /* leave all but MAX value */
1116 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1117
1118 /* set new MAX value */
1119 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1120 & FUNC_MF_CFG_MAX_BW_MASK;
1121
1122 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1123 }
1124}
1125
Dmitry Kravkovca924292011-06-14 01:33:08 +00001126/**
1127 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1128 *
1129 * @bp: driver handle
1130 * @nvecs: number of vectors to be released
1131 */
1132static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001133{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001134 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001135
Dmitry Kravkovca924292011-06-14 01:33:08 +00001136 if (nvecs == offset)
1137 return;
1138 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001139 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001140 bp->msix_table[offset].vector);
1141 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001142#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001143 if (nvecs == offset)
1144 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001145 offset++;
1146#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001147
Dmitry Kravkovca924292011-06-14 01:33:08 +00001148 for_each_eth_queue(bp, i) {
1149 if (nvecs == offset)
1150 return;
1151 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1152 "irq\n", i, bp->msix_table[offset].vector);
1153
1154 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001155 }
1156}
1157
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001158void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001159{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001160 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001161 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001162 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001163 else if (bp->flags & USING_MSI_FLAG)
1164 free_irq(bp->pdev->irq, bp->dev);
1165 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001166 free_irq(bp->pdev->irq, bp->dev);
1167}
1168
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001169int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001170{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001171 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001172
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001173 bp->msix_table[msix_vec].entry = msix_vec;
1174 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1175 bp->msix_table[0].entry);
1176 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001177
1178#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001179 bp->msix_table[msix_vec].entry = msix_vec;
1180 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1181 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1182 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001183#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001184 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001185 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001186 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001188 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1189 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001190 }
1191
Ariel Elior6383c0b2011-07-14 08:31:57 +00001192 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001193
1194 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001195
1196 /*
1197 * reconfigure number of tx/rx queues according to available
1198 * MSI-X vectors
1199 */
1200 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001201 /* how less vectors we will have? */
1202 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203
1204 DP(NETIF_MSG_IFUP,
1205 "Trying to use less MSI-X vectors: %d\n", rc);
1206
1207 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1208
1209 if (rc) {
1210 DP(NETIF_MSG_IFUP,
1211 "MSI-X is not attainable rc %d\n", rc);
1212 return rc;
1213 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001214 /*
1215 * decrease number of queues by number of unallocated entries
1216 */
1217 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001218
1219 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1220 bp->num_queues);
1221 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001222 /* fall to INTx if not enough memory */
1223 if (rc == -ENOMEM)
1224 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001225 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1226 return rc;
1227 }
1228
1229 bp->flags |= USING_MSIX_FLAG;
1230
1231 return 0;
1232}
1233
1234static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1235{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001236 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001237
Dmitry Kravkovca924292011-06-14 01:33:08 +00001238 rc = request_irq(bp->msix_table[offset++].vector,
1239 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001240 bp->dev->name, bp->dev);
1241 if (rc) {
1242 BNX2X_ERR("request sp irq failed\n");
1243 return -EBUSY;
1244 }
1245
1246#ifdef BCM_CNIC
1247 offset++;
1248#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001249 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001250 struct bnx2x_fastpath *fp = &bp->fp[i];
1251 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1252 bp->dev->name, i);
1253
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001254 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001255 bnx2x_msix_fp_int, 0, fp->name, fp);
1256 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001257 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1258 bp->msix_table[offset].vector, rc);
1259 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001260 return -EBUSY;
1261 }
1262
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001263 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001264 }
1265
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001266 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001267 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001268 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1269 " ... fp[%d] %d\n",
1270 bp->msix_table[0].vector,
1271 0, bp->msix_table[offset].vector,
1272 i - 1, bp->msix_table[offset + i - 1].vector);
1273
1274 return 0;
1275}
1276
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001277int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278{
1279 int rc;
1280
1281 rc = pci_enable_msi(bp->pdev);
1282 if (rc) {
1283 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1284 return -1;
1285 }
1286 bp->flags |= USING_MSI_FLAG;
1287
1288 return 0;
1289}
1290
1291static int bnx2x_req_irq(struct bnx2x *bp)
1292{
1293 unsigned long flags;
1294 int rc;
1295
1296 if (bp->flags & USING_MSI_FLAG)
1297 flags = 0;
1298 else
1299 flags = IRQF_SHARED;
1300
1301 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1302 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001303 return rc;
1304}
1305
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001306static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1307{
1308 int rc = 0;
1309 if (bp->flags & USING_MSIX_FLAG) {
1310 rc = bnx2x_req_msix_irqs(bp);
1311 if (rc)
1312 return rc;
1313 } else {
1314 bnx2x_ack_int(bp);
1315 rc = bnx2x_req_irq(bp);
1316 if (rc) {
1317 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1318 return rc;
1319 }
1320 if (bp->flags & USING_MSI_FLAG) {
1321 bp->dev->irq = bp->pdev->irq;
1322 netdev_info(bp->dev, "using MSI IRQ %d\n",
1323 bp->pdev->irq);
1324 }
1325 }
1326
1327 return 0;
1328}
1329
1330static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001331{
1332 int i;
1333
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001334 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001335 napi_enable(&bnx2x_fp(bp, i, napi));
1336}
1337
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001338static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001339{
1340 int i;
1341
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001342 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001343 napi_disable(&bnx2x_fp(bp, i, napi));
1344}
1345
1346void bnx2x_netif_start(struct bnx2x *bp)
1347{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001348 if (netif_running(bp->dev)) {
1349 bnx2x_napi_enable(bp);
1350 bnx2x_int_enable(bp);
1351 if (bp->state == BNX2X_STATE_OPEN)
1352 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001353 }
1354}
1355
1356void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1357{
1358 bnx2x_int_disable_sync(bp, disable_hw);
1359 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001360}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001361
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001362u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1363{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001364 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001365
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001366#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001367 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001368 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1369 u16 ether_type = ntohs(hdr->h_proto);
1370
1371 /* Skip VLAN tag if present */
1372 if (ether_type == ETH_P_8021Q) {
1373 struct vlan_ethhdr *vhdr =
1374 (struct vlan_ethhdr *)skb->data;
1375
1376 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1377 }
1378
1379 /* If ethertype is FCoE or FIP - use FCoE ring */
1380 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001381 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001382 }
1383#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001384 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001385 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001386}
1387
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001388void bnx2x_set_num_queues(struct bnx2x *bp)
1389{
1390 switch (bp->multi_mode) {
1391 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001392 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001393 break;
1394 case ETH_RSS_MODE_REGULAR:
1395 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001396 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001397
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001399 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001400 break;
1401 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001402
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00001403#ifdef BCM_CNIC
1404 /* override in ISCSI SD mod */
1405 if (IS_MF_ISCSI_SD(bp))
1406 bp->num_queues = 1;
1407#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001408 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001409 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001410}
1411
David S. Miller823dcd22011-08-20 10:39:12 -07001412/**
1413 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1414 *
1415 * @bp: Driver handle
1416 *
1417 * We currently support for at most 16 Tx queues for each CoS thus we will
1418 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1419 * bp->max_cos.
1420 *
1421 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1422 * index after all ETH L2 indices.
1423 *
1424 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1425 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1426 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1427 *
1428 * The proper configuration of skb->queue_mapping is handled by
1429 * bnx2x_select_queue() and __skb_tx_hash().
1430 *
1431 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1432 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1433 */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001434static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1435{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001436 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001437
Ariel Elior6383c0b2011-07-14 08:31:57 +00001438 tx = MAX_TXQS_PER_COS * bp->max_cos;
1439 rx = BNX2X_NUM_ETH_QUEUES(bp);
1440
1441/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001442#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001443 if (!NO_FCOE(bp)) {
1444 rx += FCOE_PRESENT;
1445 tx += FCOE_PRESENT;
1446 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001447#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001448
1449 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1450 if (rc) {
1451 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1452 return rc;
1453 }
1454 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1455 if (rc) {
1456 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1457 return rc;
1458 }
1459
1460 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1461 tx, rx);
1462
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001463 return rc;
1464}
1465
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001466static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1467{
1468 int i;
1469
1470 for_each_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i];
Eric Dumazete52fcb22011-11-14 06:05:34 +00001472 u32 mtu;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001473
1474 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1475 if (IS_FCOE_IDX(i))
1476 /*
1477 * Although there are no IP frames expected to arrive to
1478 * this ring we still want to add an
1479 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1480 * overrun attack.
1481 */
Eric Dumazete52fcb22011-11-14 06:05:34 +00001482 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001483 else
Eric Dumazete52fcb22011-11-14 06:05:34 +00001484 mtu = bp->dev->mtu;
1485 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1486 IP_HEADER_ALIGNMENT_PADDING +
1487 ETH_OVREHEAD +
1488 mtu +
1489 BNX2X_FW_RX_ALIGN_END;
1490 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001491 }
1492}
1493
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001494static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1495{
1496 int i;
1497 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1498 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1499
1500 /*
1501 * Prepare the inital contents fo the indirection table if RSS is
1502 * enabled
1503 */
1504 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1505 for (i = 0; i < sizeof(ind_table); i++)
1506 ind_table[i] =
Ben Hutchings278bc422011-12-15 13:56:49 +00001507 bp->fp->cl_id +
1508 ethtool_rxfh_indir_default(i, num_eth_queues);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001509 }
1510
1511 /*
1512 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1513 * per-port, so if explicit configuration is needed , do it only
1514 * for a PMF.
1515 *
1516 * For 57712 and newer on the other hand it's a per-function
1517 * configuration.
1518 */
1519 return bnx2x_config_rss_pf(bp, ind_table,
1520 bp->port.pmf || !CHIP_IS_E1x(bp));
1521}
1522
1523int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1524{
1525 struct bnx2x_config_rss_params params = {0};
1526 int i;
1527
1528 /* Although RSS is meaningless when there is a single HW queue we
1529 * still need it enabled in order to have HW Rx hash generated.
1530 *
1531 * if (!is_eth_multi(bp))
1532 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1533 */
1534
1535 params.rss_obj = &bp->rss_conf_obj;
1536
1537 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1538
1539 /* RSS mode */
1540 switch (bp->multi_mode) {
1541 case ETH_RSS_MODE_DISABLED:
1542 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1543 break;
1544 case ETH_RSS_MODE_REGULAR:
1545 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1546 break;
1547 case ETH_RSS_MODE_VLAN_PRI:
1548 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1549 break;
1550 case ETH_RSS_MODE_E1HOV_PRI:
1551 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1552 break;
1553 case ETH_RSS_MODE_IP_DSCP:
1554 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1555 break;
1556 default:
1557 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1558 return -EINVAL;
1559 }
1560
1561 /* If RSS is enabled */
1562 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1563 /* RSS configuration */
1564 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1565 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1566 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1567 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1568
1569 /* Hash bits */
1570 params.rss_result_mask = MULTI_MASK;
1571
1572 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1573
1574 if (config_hash) {
1575 /* RSS keys */
1576 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1577 params.rss_key[i] = random32();
1578
1579 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1580 }
1581 }
1582
1583 return bnx2x_config_rss(bp, &params);
1584}
1585
1586static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1587{
1588 struct bnx2x_func_state_params func_params = {0};
1589
1590 /* Prepare parameters for function state transitions */
1591 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1592
1593 func_params.f_obj = &bp->func_obj;
1594 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1595
1596 func_params.params.hw_init.load_phase = load_code;
1597
1598 return bnx2x_func_state_change(bp, &func_params);
1599}
1600
1601/*
1602 * Cleans the object that have internal lists without sending
1603 * ramrods. Should be run when interrutps are disabled.
1604 */
1605static void bnx2x_squeeze_objects(struct bnx2x *bp)
1606{
1607 int rc;
1608 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1609 struct bnx2x_mcast_ramrod_params rparam = {0};
1610 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1611
1612 /***************** Cleanup MACs' object first *************************/
1613
1614 /* Wait for completion of requested */
1615 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1616 /* Perform a dry cleanup */
1617 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1618
1619 /* Clean ETH primary MAC */
1620 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1621 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1622 &ramrod_flags);
1623 if (rc != 0)
1624 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1625
1626 /* Cleanup UC list */
1627 vlan_mac_flags = 0;
1628 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1629 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1630 &ramrod_flags);
1631 if (rc != 0)
1632 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1633
1634 /***************** Now clean mcast object *****************************/
1635 rparam.mcast_obj = &bp->mcast_obj;
1636 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1637
1638 /* Add a DEL command... */
1639 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1640 if (rc < 0)
1641 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1642 "object: %d\n", rc);
1643
1644 /* ...and wait until all pending commands are cleared */
1645 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1646 while (rc != 0) {
1647 if (rc < 0) {
1648 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1649 rc);
1650 return;
1651 }
1652
1653 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1654 }
1655}
1656
1657#ifndef BNX2X_STOP_ON_ERROR
1658#define LOAD_ERROR_EXIT(bp, label) \
1659 do { \
1660 (bp)->state = BNX2X_STATE_ERROR; \
1661 goto label; \
1662 } while (0)
1663#else
1664#define LOAD_ERROR_EXIT(bp, label) \
1665 do { \
1666 (bp)->state = BNX2X_STATE_ERROR; \
1667 (bp)->panic = 1; \
1668 return -EBUSY; \
1669 } while (0)
1670#endif
1671
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001672/* must be called with rtnl_lock */
1673int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1674{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001675 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001676 u32 load_code;
1677 int i, rc;
1678
1679#ifdef BNX2X_STOP_ON_ERROR
1680 if (unlikely(bp->panic))
1681 return -EPERM;
1682#endif
1683
1684 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1685
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001686 /* Set the initial link reported state to link down */
1687 bnx2x_acquire_phy_lock(bp);
1688 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1689 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1690 &bp->last_reported_link.link_report_flags);
1691 bnx2x_release_phy_lock(bp);
1692
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001693 /* must be called before memory allocation and HW init */
1694 bnx2x_ilt_set_info(bp);
1695
Ariel Elior6383c0b2011-07-14 08:31:57 +00001696 /*
1697 * Zero fastpath structures preserving invariants like napi, which are
1698 * allocated only once, fp index, max_cos, bp pointer.
1699 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001700 */
1701 for_each_queue(bp, i)
1702 bnx2x_bz_fp(bp, i);
1703
Ariel Elior6383c0b2011-07-14 08:31:57 +00001704
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001705 /* Set the receive queues buffer size */
1706 bnx2x_set_rx_buf_size(bp);
1707
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001708 if (bnx2x_alloc_mem(bp))
1709 return -ENOMEM;
1710
1711 /* As long as bnx2x_alloc_mem() may possibly update
1712 * bp->num_queues, bnx2x_set_real_num_queues() should always
1713 * come after it.
1714 */
1715 rc = bnx2x_set_real_num_queues(bp);
1716 if (rc) {
1717 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001718 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001719 }
1720
Ariel Elior6383c0b2011-07-14 08:31:57 +00001721 /* configure multi cos mappings in kernel.
1722 * this configuration may be overriden by a multi class queue discipline
1723 * or by a dcbx negotiation result.
1724 */
1725 bnx2x_setup_tc(bp->dev, bp->max_cos);
1726
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001727 bnx2x_napi_enable(bp);
1728
Ariel Elior889b9af2012-01-26 06:01:51 +00001729 /* set pf load just before approaching the MCP */
1730 bnx2x_set_pf_load(bp);
1731
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001732 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001733 * Returns the type of LOAD command:
1734 * if it is the first port to be initialized
1735 * common blocks should be initialized, otherwise - not
1736 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001737 if (!BP_NOMCP(bp)) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00001738 /* init fw_seq */
1739 bp->fw_seq =
1740 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1741 DRV_MSG_SEQ_NUMBER_MASK);
1742 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1743
1744 /* Get current FW pulse sequence */
1745 bp->fw_drv_pulse_wr_seq =
1746 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1747 DRV_PULSE_SEQ_MASK);
1748 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1749
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001750 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001751 if (!load_code) {
1752 BNX2X_ERR("MCP response failure, aborting\n");
1753 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001754 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001755 }
1756 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1757 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001758 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001759 }
Ariel Eliord1e2d962012-01-26 06:01:49 +00001760 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1761 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1762 /* build FW version dword */
1763 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1764 (BCM_5710_FW_MINOR_VERSION << 8) +
1765 (BCM_5710_FW_REVISION_VERSION << 16) +
1766 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1767
1768 /* read loaded FW from chip */
1769 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1770
1771 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1772 loaded_fw, my_fw);
1773
1774 /* abort nic load if version mismatch */
1775 if (my_fw != loaded_fw) {
1776 BNX2X_ERR("bnx2x with FW %x already loaded, "
1777 "which mismatches my %x FW. aborting",
1778 loaded_fw, my_fw);
1779 rc = -EBUSY;
1780 LOAD_ERROR_EXIT(bp, load_error2);
1781 }
1782 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001783
1784 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001785 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001786
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001787 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1788 path, load_count[path][0], load_count[path][1],
1789 load_count[path][2]);
1790 load_count[path][0]++;
1791 load_count[path][1 + port]++;
1792 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1793 path, load_count[path][0], load_count[path][1],
1794 load_count[path][2]);
1795 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001796 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001797 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001798 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1799 else
1800 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1801 }
1802
1803 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001804 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001805 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001806 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001807 /*
1808 * We need the barrier to ensure the ordering between the
1809 * writing to bp->port.pmf here and reading it from the
1810 * bnx2x_periodic_task().
1811 */
1812 smp_mb();
1813 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1814 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001815 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001816
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001817 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001819 /* Init Function state controlling object */
1820 bnx2x__init_func_obj(bp);
1821
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001822 /* Initialize HW */
1823 rc = bnx2x_init_hw(bp, load_code);
1824 if (rc) {
1825 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001826 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001827 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001828 }
1829
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001830 /* Connect to IRQs */
1831 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001832 if (rc) {
1833 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001834 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001835 }
1836
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001837 /* Setup NIC internals and enable interrupts */
1838 bnx2x_nic_init(bp, load_code);
1839
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001840 /* Init per-function objects */
1841 bnx2x_init_bp_objs(bp);
1842
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001843 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1844 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001845 (bp->common.shmem2_base)) {
1846 if (SHMEM2_HAS(bp, dcc_support))
1847 SHMEM2_WR(bp, dcc_support,
1848 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1849 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1850 }
1851
1852 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1853 rc = bnx2x_func_start(bp);
1854 if (rc) {
1855 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001856 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001857 LOAD_ERROR_EXIT(bp, load_error3);
1858 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001859
1860 /* Send LOAD_DONE command to MCP */
1861 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001862 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001863 if (!load_code) {
1864 BNX2X_ERR("MCP response failure, aborting\n");
1865 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001866 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001867 }
1868 }
1869
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001870 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001871 if (rc) {
1872 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001873 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001874 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001875
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001876#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001877 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001878 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001879#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001880
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001881 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001882 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001883 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001884 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001885 }
1886
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001887 rc = bnx2x_init_rss_pf(bp);
1888 if (rc)
1889 LOAD_ERROR_EXIT(bp, load_error4);
1890
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001891 /* Now when Clients are configured we are ready to work */
1892 bp->state = BNX2X_STATE_OPEN;
1893
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001894 /* Configure a ucast MAC */
1895 rc = bnx2x_set_eth_mac(bp, true);
1896 if (rc)
1897 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001898
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001899 if (bp->pending_max) {
1900 bnx2x_update_max_mf_config(bp, bp->pending_max);
1901 bp->pending_max = 0;
1902 }
1903
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001904 if (bp->port.pmf)
1905 bnx2x_initial_phy_init(bp, load_mode);
1906
1907 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001908
1909 /* Initialize Rx filter. */
1910 netif_addr_lock_bh(bp->dev);
1911 bnx2x_set_rx_mode(bp->dev);
1912 netif_addr_unlock_bh(bp->dev);
1913
1914 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001915 switch (load_mode) {
1916 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001917 /* Tx queue should be only reenabled */
1918 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001919 break;
1920
1921 case LOAD_OPEN:
1922 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001923 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001924 break;
1925
1926 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001927 bp->state = BNX2X_STATE_DIAG;
1928 break;
1929
1930 default:
1931 break;
1932 }
1933
Dmitry Kravkov00253a82011-11-13 04:34:25 +00001934 if (bp->port.pmf)
1935 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1936 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001937 bnx2x__link_status_update(bp);
1938
1939 /* start the timer */
1940 mod_timer(&bp->timer, jiffies + bp->current_interval);
1941
1942#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00001943 /* re-read iscsi info */
1944 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001945 bnx2x_setup_cnic_irq_info(bp);
1946 if (bp->state == BNX2X_STATE_OPEN)
1947 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1948#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001949
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001950 /* Wait for all pending SP commands to complete */
1951 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1952 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1953 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1954 return -EBUSY;
1955 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001956
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001958 return 0;
1959
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001960#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001961load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001962#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001963 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001964 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001965#endif
1966load_error3:
1967 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001968
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001969 /* Clean queueable objects */
1970 bnx2x_squeeze_objects(bp);
1971
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001972 /* Free SKBs, SGEs, TPA pool and driver internals */
1973 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001974 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001975 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001976
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001977 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001978 bnx2x_free_irq(bp);
1979load_error2:
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1982 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1983 }
1984
1985 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001986load_error1:
1987 bnx2x_napi_disable(bp);
Ariel Elior889b9af2012-01-26 06:01:51 +00001988 /* clear pf_load status, as it was already set */
1989 bnx2x_clear_pf_load(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001990load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001991 bnx2x_free_mem(bp);
1992
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001993 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001994#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001995}
1996
1997/* must be called with rtnl_lock */
1998int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1999{
2000 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002001 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002002
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002003 if ((bp->state == BNX2X_STATE_CLOSED) ||
2004 (bp->state == BNX2X_STATE_ERROR)) {
2005 /* We can get here if the driver has been unloaded
2006 * during parity error recovery and is either waiting for a
2007 * leader to complete or for other functions to unload and
2008 * then ifdown has been issued. In this case we want to
2009 * unload and let other functions to complete a recovery
2010 * process.
2011 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002012 bp->recovery_state = BNX2X_RECOVERY_DONE;
2013 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002014 bnx2x_release_leader_lock(bp);
2015 smp_mb();
2016
2017 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002018
2019 return -EINVAL;
2020 }
2021
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002022 /*
2023 * It's important to set the bp->state to the value different from
2024 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2025 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2026 */
2027 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2028 smp_mb();
2029
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002030 /* Stop Tx */
2031 bnx2x_tx_disable(bp);
2032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002033#ifdef BCM_CNIC
2034 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2035#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002036
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002037 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002039 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002040
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002041 /* Set ALWAYS_ALIVE bit in shmem */
2042 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2043
2044 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002045
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002046 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Mintz Yuval1355b702012-02-15 02:10:22 +00002047 bnx2x_save_statistics(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002048
2049 /* Cleanup the chip if needed */
2050 if (unload_mode != UNLOAD_RECOVERY)
2051 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002052 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002053 /* Send the UNLOAD_REQUEST to the MCP */
2054 bnx2x_send_unload_req(bp, unload_mode);
2055
2056 /*
2057 * Prevent transactions to host from the functions on the
2058 * engine that doesn't reset global blocks in case of global
2059 * attention once gloabl blocks are reset and gates are opened
2060 * (the engine which leader will perform the recovery
2061 * last).
2062 */
2063 if (!CHIP_IS_E1x(bp))
2064 bnx2x_pf_disable(bp);
2065
2066 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002067 bnx2x_netif_stop(bp, 1);
2068
2069 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002070 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002071
2072 /* Report UNLOAD_DONE to MCP */
2073 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002074 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002075
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002076 /*
2077 * At this stage no more interrupts will arrive so we may safly clean
2078 * the queueable objects here in case they failed to get cleaned so far.
2079 */
2080 bnx2x_squeeze_objects(bp);
2081
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002082 /* There should be no more pending SP commands at this stage */
2083 bp->sp_state = 0;
2084
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002085 bp->port.pmf = 0;
2086
2087 /* Free SKBs, SGEs, TPA pool and driver internals */
2088 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002089 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002090 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002091
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002092 bnx2x_free_mem(bp);
2093
2094 bp->state = BNX2X_STATE_CLOSED;
2095
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002096 /* Check if there are pending parity attentions. If there are - set
2097 * RECOVERY_IN_PROGRESS.
2098 */
2099 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2100 bnx2x_set_reset_in_progress(bp);
2101
2102 /* Set RESET_IS_GLOBAL if needed */
2103 if (global)
2104 bnx2x_set_reset_global(bp);
2105 }
2106
2107
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002108 /* The last driver must disable a "close the gate" if there is no
2109 * parity attention or "process kill" pending.
2110 */
Ariel Elior889b9af2012-01-26 06:01:51 +00002111 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002112 bnx2x_disable_close_the_gate(bp);
2113
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002114 return 0;
2115}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002117int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2118{
2119 u16 pmcsr;
2120
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002121 /* If there is no power capability, silently succeed */
2122 if (!bp->pm_cap) {
2123 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2124 return 0;
2125 }
2126
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002127 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2128
2129 switch (state) {
2130 case PCI_D0:
2131 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2132 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2133 PCI_PM_CTRL_PME_STATUS));
2134
2135 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2136 /* delay required during transition out of D3hot */
2137 msleep(20);
2138 break;
2139
2140 case PCI_D3hot:
2141 /* If there are other clients above don't
2142 shut down the power */
2143 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2144 return 0;
2145 /* Don't shut down the power for emulation and FPGA */
2146 if (CHIP_REV_IS_SLOW(bp))
2147 return 0;
2148
2149 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2150 pmcsr |= 3;
2151
2152 if (bp->wol)
2153 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2154
2155 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2156 pmcsr);
2157
2158 /* No more memory access after this point until
2159 * device is brought back to D0.
2160 */
2161 break;
2162
2163 default:
2164 return -EINVAL;
2165 }
2166 return 0;
2167}
2168
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002169/*
2170 * net_device service functions
2171 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002172int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002173{
2174 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002175 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002176 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2177 napi);
2178 struct bnx2x *bp = fp->bp;
2179
2180 while (1) {
2181#ifdef BNX2X_STOP_ON_ERROR
2182 if (unlikely(bp->panic)) {
2183 napi_complete(napi);
2184 return 0;
2185 }
2186#endif
2187
Ariel Elior6383c0b2011-07-14 08:31:57 +00002188 for_each_cos_in_tx_queue(fp, cos)
2189 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2190 bnx2x_tx_int(bp, &fp->txdata[cos]);
2191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002192
2193 if (bnx2x_has_rx_work(fp)) {
2194 work_done += bnx2x_rx_int(fp, budget - work_done);
2195
2196 /* must not complete if we consumed full budget */
2197 if (work_done >= budget)
2198 break;
2199 }
2200
2201 /* Fall out from the NAPI loop if needed */
2202 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002203#ifdef BCM_CNIC
2204 /* No need to update SB for FCoE L2 ring as long as
2205 * it's connected to the default SB and the SB
2206 * has been updated when NAPI was scheduled.
2207 */
2208 if (IS_FCOE_FP(fp)) {
2209 napi_complete(napi);
2210 break;
2211 }
2212#endif
2213
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002214 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002215 /* bnx2x_has_rx_work() reads the status block,
2216 * thus we need to ensure that status block indices
2217 * have been actually read (bnx2x_update_fpsb_idx)
2218 * prior to this check (bnx2x_has_rx_work) so that
2219 * we won't write the "newer" value of the status block
2220 * to IGU (if there was a DMA right after
2221 * bnx2x_has_rx_work and if there is no rmb, the memory
2222 * reading (bnx2x_update_fpsb_idx) may be postponed
2223 * to right before bnx2x_ack_sb). In this case there
2224 * will never be another interrupt until there is
2225 * another update of the status block, while there
2226 * is still unhandled work.
2227 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002228 rmb();
2229
2230 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2231 napi_complete(napi);
2232 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002233 DP(NETIF_MSG_HW,
2234 "Update index to %d\n", fp->fp_hc_idx);
2235 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2236 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002237 IGU_INT_ENABLE, 1);
2238 break;
2239 }
2240 }
2241 }
2242
2243 return work_done;
2244}
2245
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002246/* we split the first BD into headers and data BDs
2247 * to ease the pain of our fellow microcode engineers
2248 * we use one mapping for both BDs
2249 * So far this has only been observed to happen
2250 * in Other Operating Systems(TM)
2251 */
2252static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002253 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002254 struct sw_tx_bd *tx_buf,
2255 struct eth_tx_start_bd **tx_bd, u16 hlen,
2256 u16 bd_prod, int nbd)
2257{
2258 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2259 struct eth_tx_bd *d_tx_bd;
2260 dma_addr_t mapping;
2261 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2262
2263 /* first fix first BD */
2264 h_tx_bd->nbd = cpu_to_le16(nbd);
2265 h_tx_bd->nbytes = cpu_to_le16(hlen);
2266
2267 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2268 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2269 h_tx_bd->addr_lo, h_tx_bd->nbd);
2270
2271 /* now get a new data BD
2272 * (after the pbd) and fill it */
2273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002274 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002275
2276 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2277 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2278
2279 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2280 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2281 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2282
2283 /* this marks the BD as one that has no individual mapping */
2284 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2285
2286 DP(NETIF_MSG_TX_QUEUED,
2287 "TSO split data size is %d (%x:%x)\n",
2288 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2289
2290 /* update tx_bd */
2291 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2292
2293 return bd_prod;
2294}
2295
2296static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2297{
2298 if (fix > 0)
2299 csum = (u16) ~csum_fold(csum_sub(csum,
2300 csum_partial(t_header - fix, fix, 0)));
2301
2302 else if (fix < 0)
2303 csum = (u16) ~csum_fold(csum_add(csum,
2304 csum_partial(t_header, -fix, 0)));
2305
2306 return swab16(csum);
2307}
2308
2309static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2310{
2311 u32 rc;
2312
2313 if (skb->ip_summed != CHECKSUM_PARTIAL)
2314 rc = XMIT_PLAIN;
2315
2316 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002317 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002318 rc = XMIT_CSUM_V6;
2319 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2320 rc |= XMIT_CSUM_TCP;
2321
2322 } else {
2323 rc = XMIT_CSUM_V4;
2324 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2325 rc |= XMIT_CSUM_TCP;
2326 }
2327 }
2328
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002329 if (skb_is_gso_v6(skb))
2330 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2331 else if (skb_is_gso(skb))
2332 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002333
2334 return rc;
2335}
2336
2337#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2338/* check if packet requires linearization (packet is too fragmented)
2339 no need to check fragmentation if page size > 8K (there will be no
2340 violation to FW restrictions) */
2341static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2342 u32 xmit_type)
2343{
2344 int to_copy = 0;
2345 int hlen = 0;
2346 int first_bd_sz = 0;
2347
2348 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2349 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2350
2351 if (xmit_type & XMIT_GSO) {
2352 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2353 /* Check if LSO packet needs to be copied:
2354 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2355 int wnd_size = MAX_FETCH_BD - 3;
2356 /* Number of windows to check */
2357 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2358 int wnd_idx = 0;
2359 int frag_idx = 0;
2360 u32 wnd_sum = 0;
2361
2362 /* Headers length */
2363 hlen = (int)(skb_transport_header(skb) - skb->data) +
2364 tcp_hdrlen(skb);
2365
2366 /* Amount of data (w/o headers) on linear part of SKB*/
2367 first_bd_sz = skb_headlen(skb) - hlen;
2368
2369 wnd_sum = first_bd_sz;
2370
2371 /* Calculate the first sum - it's special */
2372 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2373 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002374 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002375
2376 /* If there was data on linear skb data - check it */
2377 if (first_bd_sz > 0) {
2378 if (unlikely(wnd_sum < lso_mss)) {
2379 to_copy = 1;
2380 goto exit_lbl;
2381 }
2382
2383 wnd_sum -= first_bd_sz;
2384 }
2385
2386 /* Others are easier: run through the frag list and
2387 check all windows */
2388 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2389 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002390 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002391
2392 if (unlikely(wnd_sum < lso_mss)) {
2393 to_copy = 1;
2394 break;
2395 }
2396 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002397 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002398 }
2399 } else {
2400 /* in non-LSO too fragmented packet should always
2401 be linearized */
2402 to_copy = 1;
2403 }
2404 }
2405
2406exit_lbl:
2407 if (unlikely(to_copy))
2408 DP(NETIF_MSG_TX_QUEUED,
2409 "Linearization IS REQUIRED for %s packet. "
2410 "num_frags %d hlen %d first_bd_sz %d\n",
2411 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2412 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2413
2414 return to_copy;
2415}
2416#endif
2417
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002418static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2419 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002420{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002421 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2422 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2423 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002424 if ((xmit_type & XMIT_GSO_V6) &&
2425 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002426 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002427}
2428
2429/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002430 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002431 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002432 * @skb: packet skb
2433 * @pbd: parse BD
2434 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002435 */
2436static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2437 struct eth_tx_parse_bd_e1x *pbd,
2438 u32 xmit_type)
2439{
2440 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2441 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2442 pbd->tcp_flags = pbd_tcp_flags(skb);
2443
2444 if (xmit_type & XMIT_GSO_V4) {
2445 pbd->ip_id = swab16(ip_hdr(skb)->id);
2446 pbd->tcp_pseudo_csum =
2447 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2448 ip_hdr(skb)->daddr,
2449 0, IPPROTO_TCP, 0));
2450
2451 } else
2452 pbd->tcp_pseudo_csum =
2453 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2454 &ipv6_hdr(skb)->daddr,
2455 0, IPPROTO_TCP, 0));
2456
2457 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2458}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002459
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002460/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002461 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002462 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002463 * @bp: driver handle
2464 * @skb: packet skb
2465 * @parsing_data: data to be updated
2466 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002467 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002468 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002469 */
2470static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002471 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002472{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002473 *parsing_data |=
2474 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2475 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2476 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002477
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002478 if (xmit_type & XMIT_CSUM_TCP) {
2479 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2480 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2481 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002482
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002483 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2484 } else
2485 /* We support checksum offload for TCP and UDP only.
2486 * No need to pass the UDP header length - it's a constant.
2487 */
2488 return skb_transport_header(skb) +
2489 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002490}
2491
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002492static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2493 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2494{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002495 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2496
2497 if (xmit_type & XMIT_CSUM_V4)
2498 tx_start_bd->bd_flags.as_bitfield |=
2499 ETH_TX_BD_FLAGS_IP_CSUM;
2500 else
2501 tx_start_bd->bd_flags.as_bitfield |=
2502 ETH_TX_BD_FLAGS_IPV6;
2503
2504 if (!(xmit_type & XMIT_CSUM_TCP))
2505 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002506}
2507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002508/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002509 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002510 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002511 * @bp: driver handle
2512 * @skb: packet skb
2513 * @pbd: parse BD to be updated
2514 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002515 */
2516static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2517 struct eth_tx_parse_bd_e1x *pbd,
2518 u32 xmit_type)
2519{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002520 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002521
2522 /* for now NS flag is not used in Linux */
2523 pbd->global_data =
2524 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2525 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2526
2527 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002528 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002529
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002530 hlen += pbd->ip_hlen_w;
2531
2532 /* We support checksum offload for TCP and UDP only */
2533 if (xmit_type & XMIT_CSUM_TCP)
2534 hlen += tcp_hdrlen(skb) / 2;
2535 else
2536 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002537
2538 pbd->total_hlen_w = cpu_to_le16(hlen);
2539 hlen = hlen*2;
2540
2541 if (xmit_type & XMIT_CSUM_TCP) {
2542 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2543
2544 } else {
2545 s8 fix = SKB_CS_OFF(skb); /* signed! */
2546
2547 DP(NETIF_MSG_TX_QUEUED,
2548 "hlen %d fix %d csum before fix %x\n",
2549 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2550
2551 /* HW bug: fixup the CSUM */
2552 pbd->tcp_pseudo_csum =
2553 bnx2x_csum_fix(skb_transport_header(skb),
2554 SKB_CS(skb), fix);
2555
2556 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2557 pbd->tcp_pseudo_csum);
2558 }
2559
2560 return hlen;
2561}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002562
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002563/* called with netif_tx_lock
2564 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2565 * netif_wake_queue()
2566 */
2567netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2568{
2569 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002570
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002571 struct bnx2x_fastpath *fp;
2572 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002573 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002574 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002575 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002576 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002577 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002578 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002579 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002580 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002581 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002582 dma_addr_t mapping;
2583 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2584 int i;
2585 u8 hlen = 0;
2586 __le16 pkt_size = 0;
2587 struct ethhdr *eth;
2588 u8 mac_type = UNICAST_ADDRESS;
2589
2590#ifdef BNX2X_STOP_ON_ERROR
2591 if (unlikely(bp->panic))
2592 return NETDEV_TX_BUSY;
2593#endif
2594
Ariel Elior6383c0b2011-07-14 08:31:57 +00002595 txq_index = skb_get_queue_mapping(skb);
2596 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002597
Ariel Elior6383c0b2011-07-14 08:31:57 +00002598 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2599
2600 /* decode the fastpath index and the cos index from the txq */
2601 fp_index = TXQ_TO_FP(txq_index);
2602 txdata_index = TXQ_TO_COS(txq_index);
2603
2604#ifdef BCM_CNIC
2605 /*
2606 * Override the above for the FCoE queue:
2607 * - FCoE fp entry is right after the ETH entries.
2608 * - FCoE L2 queue uses bp->txdata[0] only.
2609 */
2610 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2611 bnx2x_fcoe_tx(bp, txq_index)))) {
2612 fp_index = FCOE_IDX;
2613 txdata_index = 0;
2614 }
2615#endif
2616
2617 /* enable this debug print to view the transmission queue being used
Joe Perches94f05b02011-08-14 12:16:20 +00002618 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002619 txq_index, fp_index, txdata_index); */
2620
2621 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002622 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002623 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002624
Ariel Elior6383c0b2011-07-14 08:31:57 +00002625 /* enable this debug print to view the tranmission details
2626 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
Joe Perches94f05b02011-08-14 12:16:20 +00002627 " tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002628 txdata->cid, fp_index, txdata_index, txdata, fp); */
2629
2630 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2631 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002632 fp->eth_q_stats.driver_xoff++;
2633 netif_tx_stop_queue(txq);
2634 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2635 return NETDEV_TX_BUSY;
2636 }
2637
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002638 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2639 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002640 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002641 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2642
2643 eth = (struct ethhdr *)skb->data;
2644
2645 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2646 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2647 if (is_broadcast_ether_addr(eth->h_dest))
2648 mac_type = BROADCAST_ADDRESS;
2649 else
2650 mac_type = MULTICAST_ADDRESS;
2651 }
2652
2653#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2654 /* First, check if we need to linearize the skb (due to FW
2655 restrictions). No need to check fragmentation if page size > 8K
2656 (there will be no violation to FW restrictions) */
2657 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2658 /* Statistics of linearization */
2659 bp->lin_cnt++;
2660 if (skb_linearize(skb) != 0) {
2661 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2662 "silently dropping this SKB\n");
2663 dev_kfree_skb_any(skb);
2664 return NETDEV_TX_OK;
2665 }
2666 }
2667#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002668 /* Map skb linear data for DMA */
2669 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2670 skb_headlen(skb), DMA_TO_DEVICE);
2671 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2672 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2673 "silently dropping this SKB\n");
2674 dev_kfree_skb_any(skb);
2675 return NETDEV_TX_OK;
2676 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002677 /*
2678 Please read carefully. First we use one BD which we mark as start,
2679 then we have a parsing info BD (used for TSO or xsum),
2680 and only then we have the rest of the TSO BDs.
2681 (don't forget to mark the last one as last,
2682 and to unmap only AFTER you write to the BD ...)
2683 And above all, all pdb sizes are in words - NOT DWORDS!
2684 */
2685
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002686 /* get current pkt produced now - advance it just before sending packet
2687 * since mapping of pages may fail and cause packet to be dropped
2688 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002689 pkt_prod = txdata->tx_pkt_prod;
2690 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002691
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002692 /* get a tx_buf and first BD
2693 * tx_start_bd may be changed during SPLIT,
2694 * but first_bd will always stay first
2695 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002696 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2697 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002698 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002699
2700 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002701 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2702 mac_type);
2703
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002704 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002705 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002706
2707 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002708 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002709 tx_buf->skb = skb;
2710 tx_buf->flags = 0;
2711
2712 DP(NETIF_MSG_TX_QUEUED,
2713 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002714 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002715
Jesse Grosseab6d182010-10-20 13:56:03 +00002716 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002717 tx_start_bd->vlan_or_ethertype =
2718 cpu_to_le16(vlan_tx_tag_get(skb));
2719 tx_start_bd->bd_flags.as_bitfield |=
2720 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002721 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002722 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002723
2724 /* turn on parsing and get a BD */
2725 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002726
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002727 if (xmit_type & XMIT_CSUM)
2728 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002729
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002730 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002731 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002732 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2733 /* Set PBD in checksum offload case */
2734 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002735 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2736 &pbd_e2_parsing_data,
2737 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002738 if (IS_MF_SI(bp)) {
2739 /*
2740 * fill in the MAC addresses in the PBD - for local
2741 * switching
2742 */
2743 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2744 &pbd_e2->src_mac_addr_mid,
2745 &pbd_e2->src_mac_addr_lo,
2746 eth->h_source);
2747 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2748 &pbd_e2->dst_mac_addr_mid,
2749 &pbd_e2->dst_mac_addr_lo,
2750 eth->h_dest);
2751 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002752 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002753 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002754 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2755 /* Set PBD in checksum offload case */
2756 if (xmit_type & XMIT_CSUM)
2757 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002758
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002759 }
2760
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002761 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002762 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2763 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002764 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002765 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2766 pkt_size = tx_start_bd->nbytes;
2767
2768 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2769 " nbytes %d flags %x vlan %x\n",
2770 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2771 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002772 tx_start_bd->bd_flags.as_bitfield,
2773 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002774
2775 if (xmit_type & XMIT_GSO) {
2776
2777 DP(NETIF_MSG_TX_QUEUED,
2778 "TSO packet len %d hlen %d total len %d tso size %d\n",
2779 skb->len, hlen, skb_headlen(skb),
2780 skb_shinfo(skb)->gso_size);
2781
2782 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2783
2784 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002785 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2786 &tx_start_bd, hlen,
2787 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002788 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002789 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2790 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002791 else
2792 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002794
2795 /* Set the PBD's parsing_data field if not zero
2796 * (for the chips newer than 57711).
2797 */
2798 if (pbd_e2_parsing_data)
2799 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2800
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002801 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2802
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002803 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002804 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2805 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2806
Eric Dumazet9e903e02011-10-18 21:00:24 +00002807 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2808 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002809 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Tom Herbert2df1a702011-11-28 16:33:37 +00002810 unsigned int pkts_compl = 0, bytes_compl = 0;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002811
2812 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2813 "dropping packet...\n");
2814
2815 /* we need unmap all buffers already mapped
2816 * for this SKB;
2817 * first_bd->nbd need to be properly updated
2818 * before call to bnx2x_free_tx_pkt
2819 */
2820 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002821 bnx2x_free_tx_pkt(bp, txdata,
Tom Herbert2df1a702011-11-28 16:33:37 +00002822 TX_BD(txdata->tx_pkt_prod),
2823 &pkts_compl, &bytes_compl);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002824 return NETDEV_TX_OK;
2825 }
2826
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002828 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002829 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002830 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002831
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002832 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2833 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00002834 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2835 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002836 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002837
2838 DP(NETIF_MSG_TX_QUEUED,
2839 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2840 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2841 le16_to_cpu(tx_data_bd->nbytes));
2842 }
2843
2844 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2845
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002846 /* update with actual num BDs */
2847 first_bd->nbd = cpu_to_le16(nbd);
2848
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002849 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2850
2851 /* now send a tx doorbell, counting the next BD
2852 * if the packet contains or ends with it
2853 */
2854 if (TX_BD_POFF(bd_prod) < nbd)
2855 nbd++;
2856
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002857 /* total_pkt_bytes should be set on the first data BD if
2858 * it's not an LSO packet and there is more than one
2859 * data BD. In this case pkt_size is limited by an MTU value.
2860 * However we prefer to set it for an LSO packet (while we don't
2861 * have to) in order to save some CPU cycles in a none-LSO
2862 * case, when we much more care about them.
2863 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864 if (total_pkt_bd != NULL)
2865 total_pkt_bd->total_pkt_bytes = pkt_size;
2866
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002867 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002868 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002869 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002870 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002871 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2872 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2873 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2874 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002875 if (pbd_e2)
2876 DP(NETIF_MSG_TX_QUEUED,
2877 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2878 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2879 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2880 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2881 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002882 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2883
Tom Herbert2df1a702011-11-28 16:33:37 +00002884 netdev_tx_sent_queue(txq, skb->len);
2885
Ariel Elior6383c0b2011-07-14 08:31:57 +00002886 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002887 /*
2888 * Make sure that the BD data is updated before updating the producer
2889 * since FW might read the BD right after the producer is updated.
2890 * This is only applicable for weak-ordered memory model archs such
2891 * as IA-64. The following barrier is also mandatory since FW will
2892 * assumes packets must have BDs.
2893 */
2894 wmb();
2895
Ariel Elior6383c0b2011-07-14 08:31:57 +00002896 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002897 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002898
Ariel Elior6383c0b2011-07-14 08:31:57 +00002899 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002900
2901 mmiowb();
2902
Ariel Elior6383c0b2011-07-14 08:31:57 +00002903 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002904
Ariel Elior6383c0b2011-07-14 08:31:57 +00002905 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002906 netif_tx_stop_queue(txq);
2907
2908 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2909 * ordering of set_bit() in netif_tx_stop_queue() and read of
2910 * fp->bd_tx_cons */
2911 smp_mb();
2912
2913 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002914 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002915 netif_tx_wake_queue(txq);
2916 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002917 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002918
2919 return NETDEV_TX_OK;
2920}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002921
Ariel Elior6383c0b2011-07-14 08:31:57 +00002922/**
2923 * bnx2x_setup_tc - routine to configure net_device for multi tc
2924 *
2925 * @netdev: net device to configure
2926 * @tc: number of traffic classes to enable
2927 *
2928 * callback connected to the ndo_setup_tc function pointer
2929 */
2930int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2931{
2932 int cos, prio, count, offset;
2933 struct bnx2x *bp = netdev_priv(dev);
2934
2935 /* setup tc must be called under rtnl lock */
2936 ASSERT_RTNL();
2937
2938 /* no traffic classes requested. aborting */
2939 if (!num_tc) {
2940 netdev_reset_tc(dev);
2941 return 0;
2942 }
2943
2944 /* requested to support too many traffic classes */
2945 if (num_tc > bp->max_cos) {
2946 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
Joe Perches94f05b02011-08-14 12:16:20 +00002947 " requested: %d. max supported is %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002948 num_tc, bp->max_cos);
2949 return -EINVAL;
2950 }
2951
2952 /* declare amount of supported traffic classes */
2953 if (netdev_set_num_tc(dev, num_tc)) {
Joe Perches94f05b02011-08-14 12:16:20 +00002954 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002955 num_tc);
2956 return -EINVAL;
2957 }
2958
2959 /* configure priority to traffic class mapping */
2960 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2961 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Joe Perches94f05b02011-08-14 12:16:20 +00002962 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002963 prio, bp->prio_to_cos[prio]);
2964 }
2965
2966
2967 /* Use this configuration to diffrentiate tc0 from other COSes
2968 This can be used for ets or pfc, and save the effort of setting
2969 up a multio class queue disc or negotiating DCBX with a switch
2970 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00002971 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002972 for (prio = 1; prio < 16; prio++) {
2973 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00002974 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002975 } */
2976
2977 /* configure traffic class to transmission queue mapping */
2978 for (cos = 0; cos < bp->max_cos; cos++) {
2979 count = BNX2X_NUM_ETH_QUEUES(bp);
2980 offset = cos * MAX_TXQS_PER_COS;
2981 netdev_set_tc_queue(dev, cos, count, offset);
Joe Perches94f05b02011-08-14 12:16:20 +00002982 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002983 cos, offset, count);
2984 }
2985
2986 return 0;
2987}
2988
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002989/* called with rtnl_lock */
2990int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2991{
2992 struct sockaddr *addr = p;
2993 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002994 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002995
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00002996 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002997 return -EINVAL;
2998
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00002999#ifdef BCM_CNIC
3000 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
3001 return -EINVAL;
3002#endif
3003
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003004 if (netif_running(dev)) {
3005 rc = bnx2x_set_eth_mac(bp, false);
3006 if (rc)
3007 return rc;
3008 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003009
Danny Kukawka7ce5d222012-02-15 06:45:40 +00003010 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003011 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3012
3013 if (netif_running(dev))
3014 rc = bnx2x_set_eth_mac(bp, true);
3015
3016 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003017}
3018
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003019static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3020{
3021 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3022 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003023 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003024
3025 /* Common */
3026#ifdef BCM_CNIC
3027 if (IS_FCOE_IDX(fp_index)) {
3028 memset(sb, 0, sizeof(union host_hc_status_block));
3029 fp->status_blk_mapping = 0;
3030
3031 } else {
3032#endif
3033 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003034 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003035 BNX2X_PCI_FREE(sb->e2_sb,
3036 bnx2x_fp(bp, fp_index,
3037 status_blk_mapping),
3038 sizeof(struct host_hc_status_block_e2));
3039 else
3040 BNX2X_PCI_FREE(sb->e1x_sb,
3041 bnx2x_fp(bp, fp_index,
3042 status_blk_mapping),
3043 sizeof(struct host_hc_status_block_e1x));
3044#ifdef BCM_CNIC
3045 }
3046#endif
3047 /* Rx */
3048 if (!skip_rx_queue(bp, fp_index)) {
3049 bnx2x_free_rx_bds(fp);
3050
3051 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3052 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3053 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3054 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3055 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3056
3057 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3058 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3059 sizeof(struct eth_fast_path_rx_cqe) *
3060 NUM_RCQ_BD);
3061
3062 /* SGE ring */
3063 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3064 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3065 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3066 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3067 }
3068
3069 /* Tx */
3070 if (!skip_tx_queue(bp, fp_index)) {
3071 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003072 for_each_cos_in_tx_queue(fp, cos) {
3073 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3074
3075 DP(BNX2X_MSG_SP,
Joe Perches94f05b02011-08-14 12:16:20 +00003076 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003077 fp_index, cos, txdata->cid);
3078
3079 BNX2X_FREE(txdata->tx_buf_ring);
3080 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3081 txdata->tx_desc_mapping,
3082 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3083 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003084 }
3085 /* end of fastpath */
3086}
3087
3088void bnx2x_free_fp_mem(struct bnx2x *bp)
3089{
3090 int i;
3091 for_each_queue(bp, i)
3092 bnx2x_free_fp_mem_at(bp, i);
3093}
3094
3095static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3096{
3097 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003098 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003099 bnx2x_fp(bp, index, sb_index_values) =
3100 (__le16 *)status_blk.e2_sb->sb.index_values;
3101 bnx2x_fp(bp, index, sb_running_index) =
3102 (__le16 *)status_blk.e2_sb->sb.running_index;
3103 } else {
3104 bnx2x_fp(bp, index, sb_index_values) =
3105 (__le16 *)status_blk.e1x_sb->sb.index_values;
3106 bnx2x_fp(bp, index, sb_running_index) =
3107 (__le16 *)status_blk.e1x_sb->sb.running_index;
3108 }
3109}
3110
3111static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3112{
3113 union host_hc_status_block *sb;
3114 struct bnx2x_fastpath *fp = &bp->fp[index];
3115 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003116 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003117 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003118
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003119#ifdef BCM_CNIC
Dmitry Kravkov1fdf1552012-01-23 07:31:54 +00003120 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003121 rx_ring_size = MIN_RX_SIZE_NONTPA;
3122 bp->rx_ring_size = rx_ring_size;
3123 } else
3124#endif
David S. Miller8decf862011-09-22 03:23:13 -04003125 if (!bp->rx_ring_size) {
Mintz Yuvald760fc32012-02-15 02:10:28 +00003126 u32 cfg = SHMEM_RD(bp,
3127 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003128
David S. Miller8decf862011-09-22 03:23:13 -04003129 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3130
Mintz Yuvald760fc32012-02-15 02:10:28 +00003131 /* Dercease ring size for 1G functions */
3132 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3133 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3134 rx_ring_size /= 10;
3135
David S. Miller8decf862011-09-22 03:23:13 -04003136 /* allocate at least number of buffers required by FW */
3137 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3138 MIN_RX_SIZE_TPA, rx_ring_size);
3139
3140 bp->rx_ring_size = rx_ring_size;
Dmitry Kravkov614c76d2011-11-28 12:31:49 +00003141 } else /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003142 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003143
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003144 /* Common */
3145 sb = &bnx2x_fp(bp, index, status_blk);
3146#ifdef BCM_CNIC
3147 if (!IS_FCOE_IDX(index)) {
3148#endif
3149 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003150 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003151 BNX2X_PCI_ALLOC(sb->e2_sb,
3152 &bnx2x_fp(bp, index, status_blk_mapping),
3153 sizeof(struct host_hc_status_block_e2));
3154 else
3155 BNX2X_PCI_ALLOC(sb->e1x_sb,
3156 &bnx2x_fp(bp, index, status_blk_mapping),
3157 sizeof(struct host_hc_status_block_e1x));
3158#ifdef BCM_CNIC
3159 }
3160#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003161
3162 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3163 * set shortcuts for it.
3164 */
3165 if (!IS_FCOE_IDX(index))
3166 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003167
3168 /* Tx */
3169 if (!skip_tx_queue(bp, index)) {
3170 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003171 for_each_cos_in_tx_queue(fp, cos) {
3172 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3173
3174 DP(BNX2X_MSG_SP, "allocating tx memory of "
Joe Perches94f05b02011-08-14 12:16:20 +00003175 "fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003176 index, cos);
3177
3178 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003179 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003180 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3181 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003182 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003183 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003184 }
3185
3186 /* Rx */
3187 if (!skip_rx_queue(bp, index)) {
3188 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3189 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3190 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3191 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3192 &bnx2x_fp(bp, index, rx_desc_mapping),
3193 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3194
3195 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3196 &bnx2x_fp(bp, index, rx_comp_mapping),
3197 sizeof(struct eth_fast_path_rx_cqe) *
3198 NUM_RCQ_BD);
3199
3200 /* SGE ring */
3201 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3202 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3203 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3204 &bnx2x_fp(bp, index, rx_sge_mapping),
3205 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3206 /* RX BD ring */
3207 bnx2x_set_next_page_rx_bd(fp);
3208
3209 /* CQ ring */
3210 bnx2x_set_next_page_rx_cq(fp);
3211
3212 /* BDs */
3213 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3214 if (ring_size < rx_ring_size)
3215 goto alloc_mem_err;
3216 }
3217
3218 return 0;
3219
3220/* handles low memory cases */
3221alloc_mem_err:
3222 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3223 index, ring_size);
3224 /* FW will drop all packets if queue is not big enough,
3225 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003226 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003227 */
3228 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003229 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003230 /* release memory allocated for this queue */
3231 bnx2x_free_fp_mem_at(bp, index);
3232 return -ENOMEM;
3233 }
3234 return 0;
3235}
3236
3237int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3238{
3239 int i;
3240
3241 /**
3242 * 1. Allocate FP for leading - fatal if error
3243 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003244 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3245 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003246 */
3247
3248 /* leading */
3249 if (bnx2x_alloc_fp_mem_at(bp, 0))
3250 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003251
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003252#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003253 if (!NO_FCOE(bp))
3254 /* FCoE */
3255 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3256 /* we will fail load process instead of mark
3257 * NO_FCOE_FLAG
3258 */
3259 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003260#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003261
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003262 /* RSS */
3263 for_each_nondefault_eth_queue(bp, i)
3264 if (bnx2x_alloc_fp_mem_at(bp, i))
3265 break;
3266
3267 /* handle memory failures */
3268 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3269 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3270
3271 WARN_ON(delta < 0);
3272#ifdef BCM_CNIC
3273 /**
3274 * move non eth FPs next to last eth FP
3275 * must be done in that order
3276 * FCOE_IDX < FWD_IDX < OOO_IDX
3277 */
3278
Ariel Elior6383c0b2011-07-14 08:31:57 +00003279 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003280 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3281#endif
3282 bp->num_queues -= delta;
3283 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3284 bp->num_queues + delta, bp->num_queues);
3285 }
3286
3287 return 0;
3288}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003289
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003290void bnx2x_free_mem_bp(struct bnx2x *bp)
3291{
3292 kfree(bp->fp);
3293 kfree(bp->msix_table);
3294 kfree(bp->ilt);
3295}
3296
3297int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3298{
3299 struct bnx2x_fastpath *fp;
3300 struct msix_entry *tbl;
3301 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003302 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003303
Ariel Elior6383c0b2011-07-14 08:31:57 +00003304 /*
3305 * The biggest MSI-X table we might need is as a maximum number of fast
3306 * path IGU SBs plus default SB (for PF).
3307 */
3308 msix_table_size = bp->igu_sb_cnt + 1;
3309
3310 /* fp array: RSS plus CNIC related L2 queues */
Thomas Meyer01e23742011-11-29 11:08:00 +00003311 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
Ariel Elior6383c0b2011-07-14 08:31:57 +00003312 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003313 if (!fp)
3314 goto alloc_err;
3315 bp->fp = fp;
3316
3317 /* msix table */
Thomas Meyer01e23742011-11-29 11:08:00 +00003318 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003319 if (!tbl)
3320 goto alloc_err;
3321 bp->msix_table = tbl;
3322
3323 /* ilt */
3324 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3325 if (!ilt)
3326 goto alloc_err;
3327 bp->ilt = ilt;
3328
3329 return 0;
3330alloc_err:
3331 bnx2x_free_mem_bp(bp);
3332 return -ENOMEM;
3333
3334}
3335
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003336int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003337{
3338 struct bnx2x *bp = netdev_priv(dev);
3339
3340 if (unlikely(!netif_running(dev)))
3341 return 0;
3342
3343 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3344 return bnx2x_nic_load(bp, LOAD_NORMAL);
3345}
3346
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003347int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3348{
3349 u32 sel_phy_idx = 0;
3350 if (bp->link_params.num_phys <= 1)
3351 return INT_PHY;
3352
3353 if (bp->link_vars.link_up) {
3354 sel_phy_idx = EXT_PHY1;
3355 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3356 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3357 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3358 sel_phy_idx = EXT_PHY2;
3359 } else {
3360
3361 switch (bnx2x_phy_selection(&bp->link_params)) {
3362 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3363 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3364 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3365 sel_phy_idx = EXT_PHY1;
3366 break;
3367 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3368 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3369 sel_phy_idx = EXT_PHY2;
3370 break;
3371 }
3372 }
3373
3374 return sel_phy_idx;
3375
3376}
3377int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3378{
3379 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3380 /*
3381 * The selected actived PHY is always after swapping (in case PHY
3382 * swapping is enabled). So when swapping is enabled, we need to reverse
3383 * the configuration
3384 */
3385
3386 if (bp->link_params.multi_phy_config &
3387 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3388 if (sel_phy_idx == EXT_PHY1)
3389 sel_phy_idx = EXT_PHY2;
3390 else if (sel_phy_idx == EXT_PHY2)
3391 sel_phy_idx = EXT_PHY1;
3392 }
3393 return LINK_CONFIG_IDX(sel_phy_idx);
3394}
3395
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003396#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3397int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3398{
3399 struct bnx2x *bp = netdev_priv(dev);
3400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3401
3402 switch (type) {
3403 case NETDEV_FCOE_WWNN:
3404 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3405 cp->fcoe_wwn_node_name_lo);
3406 break;
3407 case NETDEV_FCOE_WWPN:
3408 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3409 cp->fcoe_wwn_port_name_lo);
3410 break;
3411 default:
3412 return -EINVAL;
3413 }
3414
3415 return 0;
3416}
3417#endif
3418
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003419/* called with rtnl_lock */
3420int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3421{
3422 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003423
3424 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00003425 netdev_err(dev, "Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003426 return -EAGAIN;
3427 }
3428
3429 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3430 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3431 return -EINVAL;
3432
3433 /* This does not race with packet allocation
3434 * because the actual alloc size is
3435 * only updated as part of load
3436 */
3437 dev->mtu = new_mtu;
3438
Michał Mirosław66371c42011-04-12 09:38:23 +00003439 return bnx2x_reload_if_running(dev);
3440}
3441
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003442netdev_features_t bnx2x_fix_features(struct net_device *dev,
3443 netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003444{
3445 struct bnx2x *bp = netdev_priv(dev);
3446
3447 /* TPA requires Rx CSUM offloading */
3448 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3449 features &= ~NETIF_F_LRO;
3450
3451 return features;
3452}
3453
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003454int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
Michał Mirosław66371c42011-04-12 09:38:23 +00003455{
3456 struct bnx2x *bp = netdev_priv(dev);
3457 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003458 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003459
3460 if (features & NETIF_F_LRO)
3461 flags |= TPA_ENABLE_FLAG;
3462 else
3463 flags &= ~TPA_ENABLE_FLAG;
3464
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003465 if (features & NETIF_F_LOOPBACK) {
3466 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3467 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3468 bnx2x_reload = true;
3469 }
3470 } else {
3471 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3472 bp->link_params.loopback_mode = LOOPBACK_NONE;
3473 bnx2x_reload = true;
3474 }
3475 }
3476
Michał Mirosław66371c42011-04-12 09:38:23 +00003477 if (flags ^ bp->flags) {
3478 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003479 bnx2x_reload = true;
3480 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003481
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003482 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003483 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3484 return bnx2x_reload_if_running(dev);
3485 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003486 }
3487
Michał Mirosław66371c42011-04-12 09:38:23 +00003488 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003489}
3490
3491void bnx2x_tx_timeout(struct net_device *dev)
3492{
3493 struct bnx2x *bp = netdev_priv(dev);
3494
3495#ifdef BNX2X_STOP_ON_ERROR
3496 if (!bp->panic)
3497 bnx2x_panic();
3498#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003499
3500 smp_mb__before_clear_bit();
3501 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3502 smp_mb__after_clear_bit();
3503
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003504 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003505 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003506}
3507
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003508int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3509{
3510 struct net_device *dev = pci_get_drvdata(pdev);
3511 struct bnx2x *bp;
3512
3513 if (!dev) {
3514 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3515 return -ENODEV;
3516 }
3517 bp = netdev_priv(dev);
3518
3519 rtnl_lock();
3520
3521 pci_save_state(pdev);
3522
3523 if (!netif_running(dev)) {
3524 rtnl_unlock();
3525 return 0;
3526 }
3527
3528 netif_device_detach(dev);
3529
3530 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3531
3532 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3533
3534 rtnl_unlock();
3535
3536 return 0;
3537}
3538
3539int bnx2x_resume(struct pci_dev *pdev)
3540{
3541 struct net_device *dev = pci_get_drvdata(pdev);
3542 struct bnx2x *bp;
3543 int rc;
3544
3545 if (!dev) {
3546 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3547 return -ENODEV;
3548 }
3549 bp = netdev_priv(dev);
3550
3551 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Ariel Elior95c6c6162012-01-26 06:01:52 +00003552 netdev_err(dev, "Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003553 return -EAGAIN;
3554 }
3555
3556 rtnl_lock();
3557
3558 pci_restore_state(pdev);
3559
3560 if (!netif_running(dev)) {
3561 rtnl_unlock();
3562 return 0;
3563 }
3564
3565 bnx2x_set_power_state(bp, PCI_D0);
3566 netif_device_attach(dev);
3567
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003568 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3569
3570 rtnl_unlock();
3571
3572 return rc;
3573}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003574
3575
3576void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3577 u32 cid)
3578{
3579 /* ustorm cxt validation */
3580 cxt->ustorm_ag_context.cdu_usage =
3581 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3582 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3583 /* xcontext validation */
3584 cxt->xstorm_ag_context.cdu_reserved =
3585 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3586 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3587}
3588
3589static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3590 u8 fw_sb_id, u8 sb_index,
3591 u8 ticks)
3592{
3593
3594 u32 addr = BAR_CSTRORM_INTMEM +
3595 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3596 REG_WR8(bp, addr, ticks);
3597 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3598 port, fw_sb_id, sb_index, ticks);
3599}
3600
3601static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3602 u16 fw_sb_id, u8 sb_index,
3603 u8 disable)
3604{
3605 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3606 u32 addr = BAR_CSTRORM_INTMEM +
3607 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3608 u16 flags = REG_RD16(bp, addr);
3609 /* clear and set */
3610 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3611 flags |= enable_flag;
3612 REG_WR16(bp, addr, flags);
3613 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3614 port, fw_sb_id, sb_index, disable);
3615}
3616
3617void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3618 u8 sb_index, u8 disable, u16 usec)
3619{
3620 int port = BP_PORT(bp);
3621 u8 ticks = usec / BNX2X_BTR;
3622
3623 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3624
3625 disable = disable ? 1 : (usec ? 0 : 1);
3626 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3627}