blob: e575e89c7d46534af4b148f8379d77a1f43eb20e [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000026#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034/**
35 * bnx2x_bz_fp - zero content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @index: fastpath index to be zeroed
39 *
40 * Makes sure the contents of the bp->fp[index].napi is kept
41 * intact.
42 */
43static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
44{
45 struct bnx2x_fastpath *fp = &bp->fp[index];
46 struct napi_struct orig_napi = fp->napi;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp, 0, sizeof(*fp));
49
50 /* Restore the NAPI object as it has been already initialized */
51 fp->napi = orig_napi;
Ariel Elior6383c0b2011-07-14 08:31:57 +000052
53 fp->bp = bp;
54 fp->index = index;
55 if (IS_ETH_FP(fp))
56 fp->max_cos = bp->max_cos;
57 else
58 /* Special queues support only one CoS */
59 fp->max_cos = 1;
60
61 /*
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
64 */
65 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
66
67#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -070068 /* We don't want TPA on an FCoE L2 ring */
69 if (IS_FCOE_FP(fp))
70 fp->disable_tpa = 1;
Ariel Elior6383c0b2011-07-14 08:31:57 +000071#endif
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000072}
73
74/**
75 * bnx2x_move_fp - move content of the fastpath structure.
76 *
77 * @bp: driver handle
78 * @from: source FP index
79 * @to: destination FP index
80 *
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact.
83 */
84static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
85{
86 struct bnx2x_fastpath *from_fp = &bp->fp[from];
87 struct bnx2x_fastpath *to_fp = &bp->fp[to];
88 struct napi_struct orig_napi = to_fp->napi;
89 /* Move bnx2x_fastpath contents */
90 memcpy(to_fp, from_fp, sizeof(*to_fp));
91 to_fp->index = to;
92
93 /* Restore the NAPI object as it has been already initialized */
94 to_fp->napi = orig_napi;
95}
96
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030097int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
98
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000099/* free skb in the packet ring at pos idx
100 * return idx of last bd freed
101 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000102static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000103 u16 idx)
104{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000105 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000106 struct eth_tx_start_bd *tx_start_bd;
107 struct eth_tx_bd *tx_data_bd;
108 struct sk_buff *skb = tx_buf->skb;
109 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
110 int nbd;
111
112 /* prefetch skb end pointer to speedup dev_kfree_skb() */
113 prefetch(&skb->end);
114
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300115 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000116 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000117
118 /* unmap first bd */
119 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000120 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000122 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000123
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300124
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000125 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
126#ifdef BNX2X_STOP_ON_ERROR
127 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
128 BNX2X_ERR("BAD nbd!\n");
129 bnx2x_panic();
130 }
131#endif
132 new_cons = nbd + tx_buf->first_bd;
133
134 /* Get the next bd */
135 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
136
137 /* Skip a parse bd... */
138 --nbd;
139 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
140
141 /* ...and the TSO split header bd since they have no mapping */
142 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
143 --nbd;
144 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
145 }
146
147 /* now free frags */
148 while (nbd > 0) {
149
150 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000151 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000152 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
153 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
154 if (--nbd)
155 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
156 }
157
158 /* release skb */
159 WARN_ON(!skb);
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000160 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000161 tx_buf->first_bd = 0;
162 tx_buf->skb = NULL;
163
164 return new_cons;
165}
166
Ariel Elior6383c0b2011-07-14 08:31:57 +0000167int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000170 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000171
172#ifdef BNX2X_STOP_ON_ERROR
173 if (unlikely(bp->panic))
174 return -1;
175#endif
176
Ariel Elior6383c0b2011-07-14 08:31:57 +0000177 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
178 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
179 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000180
181 while (sw_cons != hw_cons) {
182 u16 pkt_cons;
183
184 pkt_cons = TX_BD(sw_cons);
185
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000186 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
187 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000188 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189
Ariel Elior6383c0b2011-07-14 08:31:57 +0000190 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000191 sw_cons++;
192 }
193
Ariel Elior6383c0b2011-07-14 08:31:57 +0000194 txdata->tx_pkt_cons = sw_cons;
195 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000196
197 /* Need to make the tx_bd_cons update visible to start_xmit()
198 * before checking for netif_tx_queue_stopped(). Without the
199 * memory barrier, there is a small possibility that
200 * start_xmit() will miss it and cause the queue to be stopped
201 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300202 * On the other hand we need an rmb() here to ensure the proper
203 * ordering of bit testing in the following
204 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000205 */
206 smp_mb();
207
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000208 if (unlikely(netif_tx_queue_stopped(txq))) {
209 /* Taking tx_lock() is needed to prevent reenabling the queue
210 * while it's empty. This could have happen if rx_action() gets
211 * suspended in bnx2x_tx_int() after the condition before
212 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
213 *
214 * stops the queue->sees fresh tx_bd_cons->releases the queue->
215 * sends some packets consuming the whole queue again->
216 * stops the queue
217 */
218
219 __netif_tx_lock(txq, smp_processor_id());
220
221 if ((netif_tx_queue_stopped(txq)) &&
222 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000223 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000224 netif_tx_wake_queue(txq);
225
226 __netif_tx_unlock(txq);
227 }
228 return 0;
229}
230
231static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
232 u16 idx)
233{
234 u16 last_max = fp->last_max_sge;
235
236 if (SUB_S16(idx, last_max) > 0)
237 fp->last_max_sge = idx;
238}
239
240static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
241 struct eth_fast_path_rx_cqe *fp_cqe)
242{
243 struct bnx2x *bp = fp->bp;
244 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
245 le16_to_cpu(fp_cqe->len_on_bd)) >>
246 SGE_PAGE_SHIFT;
247 u16 last_max, last_elem, first_elem;
248 u16 delta = 0;
249 u16 i;
250
251 if (!sge_len)
252 return;
253
254 /* First mark all used pages */
255 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300256 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000257 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000258
259 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000260 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261
262 /* Here we assume that the last SGE index is the biggest */
263 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000264 bnx2x_update_last_max_sge(fp,
265 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000266
267 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300268 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
269 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000270
271 /* If ring is not full */
272 if (last_elem + 1 != first_elem)
273 last_elem++;
274
275 /* Now update the prod */
276 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
277 if (likely(fp->sge_mask[i]))
278 break;
279
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300280 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
281 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000282 }
283
284 if (delta > 0) {
285 fp->rx_sge_prod += delta;
286 /* clear page-end entries */
287 bnx2x_clear_sge_mask_next_elems(fp);
288 }
289
290 DP(NETIF_MSG_RX_STATUS,
291 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
292 fp->last_max_sge, fp->rx_sge_prod);
293}
294
295static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300296 struct sk_buff *skb, u16 cons, u16 prod,
297 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000298{
299 struct bnx2x *bp = fp->bp;
300 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
301 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
302 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
303 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300304 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
305 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000306
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300307 /* print error if current state != stop */
308 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000309 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
310
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300311 /* Try to map an empty skb from the aggregation info */
312 mapping = dma_map_single(&bp->pdev->dev,
313 first_buf->skb->data,
314 fp->rx_buf_size, DMA_FROM_DEVICE);
315 /*
316 * ...if it fails - move the skb from the consumer to the producer
317 * and set the current aggregation state as ERROR to drop it
318 * when TPA_STOP arrives.
319 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000320
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300321 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
322 /* Move the BD from the consumer to the producer */
323 bnx2x_reuse_rx_skb(fp, cons, prod);
324 tpa_info->tpa_state = BNX2X_TPA_ERROR;
325 return;
326 }
327
328 /* move empty skb from pool to prod */
329 prod_rx_buf->skb = first_buf->skb;
330 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000331 /* point prod_bd to new skb */
332 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
333 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
334
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300335 /* move partial skb from cons to pool (don't unmap yet) */
336 *first_buf = *cons_rx_buf;
337
338 /* mark bin state as START */
339 tpa_info->parsing_flags =
340 le16_to_cpu(cqe->pars_flags.flags);
341 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
342 tpa_info->tpa_state = BNX2X_TPA_START;
343 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
344 tpa_info->placement_offset = cqe->placement_offset;
345
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000346#ifdef BNX2X_STOP_ON_ERROR
347 fp->tpa_queue_used |= (1 << queue);
348#ifdef _ASM_GENERIC_INT_L64_H
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
350#else
351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
352#endif
353 fp->tpa_queue_used);
354#endif
355}
356
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000357/* Timestamp option length allowed for TPA aggregation:
358 *
359 * nop nop kind length echo val
360 */
361#define TPA_TSTAMP_OPT_LEN 12
362/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000363 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000364 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000365 * @bp: driver handle
366 * @parsing_flags: parsing flags from the START CQE
367 * @len_on_bd: total length of the first packet for the
368 * aggregation.
369 *
370 * Approximate value of the MSS for this aggregation calculated using
371 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000372 */
373static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
374 u16 len_on_bd)
375{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300376 /*
377 * TPA arrgregation won't have either IP options or TCP options
378 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000379 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300380 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
381
382 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
383 PRS_FLAG_OVERETH_IPV6)
384 hdrs_len += sizeof(struct ipv6hdr);
385 else /* IPv4 */
386 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000387
388
389 /* Check if there was a TCP timestamp, if there is it's will
390 * always be 12 bytes length: nop nop kind length echo val.
391 *
392 * Otherwise FW would close the aggregation.
393 */
394 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
395 hdrs_len += TPA_TSTAMP_OPT_LEN;
396
397 return len_on_bd - hdrs_len;
398}
399
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000400static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300401 u16 queue, struct sk_buff *skb,
402 struct eth_end_agg_rx_cqe *cqe,
403 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404{
405 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000406 u32 i, frag_len, frag_size, pages;
407 int err;
408 int j;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300409 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
410 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000411
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300412 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000413 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
414
415 /* This is needed in order to enable forwarding support */
416 if (frag_size)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300417 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
418 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000419
420#ifdef BNX2X_STOP_ON_ERROR
421 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
422 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
423 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300424 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000425 bnx2x_panic();
426 return -EINVAL;
427 }
428#endif
429
430 /* Run through the SGL and compose the fragmented skb */
431 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300432 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000433
434 /* FW gives the indices of the SGE as if the ring is an array
435 (meaning that "next" element will consume 2 indices) */
436 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
437 rx_pg = &fp->rx_page_ring[sge_idx];
438 old_rx_pg = *rx_pg;
439
440 /* If we fail to allocate a substitute page, we simply stop
441 where we are and drop the whole packet */
442 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
443 if (unlikely(err)) {
444 fp->eth_q_stats.rx_skb_alloc_failed++;
445 return err;
446 }
447
448 /* Unmap the page as we r going to pass it to the stack */
449 dma_unmap_page(&bp->pdev->dev,
450 dma_unmap_addr(&old_rx_pg, mapping),
451 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
452
453 /* Add one frag and update the appropriate fields in the skb */
454 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
455
456 skb->data_len += frag_len;
457 skb->truesize += frag_len;
458 skb->len += frag_len;
459
460 frag_size -= frag_len;
461 }
462
463 return 0;
464}
465
466static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 u16 queue, struct eth_end_agg_rx_cqe *cqe,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000468 u16 cqe_idx)
469{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300470 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
471 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
472 u8 pad = tpa_info->placement_offset;
473 u16 len = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000474 struct sk_buff *skb = rx_buf->skb;
475 /* alloc new skb */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300476 struct sk_buff *new_skb;
477 u8 old_tpa_state = tpa_info->tpa_state;
478
479 tpa_info->tpa_state = BNX2X_TPA_STOP;
480
481 /* If we there was an error during the handling of the TPA_START -
482 * drop this aggregation.
483 */
484 if (old_tpa_state == BNX2X_TPA_ERROR)
485 goto drop;
486
487 /* Try to allocate the new skb */
488 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000489
490 /* Unmap skb in the pool anyway, as we are going to change
491 pool entry status to BNX2X_TPA_STOP even if new skb allocation
492 fails. */
493 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800494 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000495
496 if (likely(new_skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000498 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000499
500#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800501 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000502 BNX2X_ERR("skb_put is about to fail... "
503 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800504 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000505 bnx2x_panic();
506 return;
507 }
508#endif
509
510 skb_reserve(skb, pad);
511 skb_put(skb, len);
512
513 skb->protocol = eth_type_trans(skb, bp->dev);
514 skb->ip_summed = CHECKSUM_UNNECESSARY;
515
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300516 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
517 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
518 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000519 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000520 } else {
521 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
522 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000523 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000524 }
525
526
527 /* put new skb in bin */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300528 rx_buf->skb = new_skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000531 }
532
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300533drop:
534 /* drop the packet and keep the buffer in the bin */
535 DP(NETIF_MSG_RX_STATUS,
536 "Failed to allocate or map a new skb - dropping packet!\n");
537 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000538}
539
540/* Set Toeplitz hash value in the skb using the value from the
541 * CQE (calculated by HW).
542 */
543static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
544 struct sk_buff *skb)
545{
546 /* Set Toeplitz hash from CQE */
547 if ((bp->dev->features & NETIF_F_RXHASH) &&
548 (cqe->fast_path_cqe.status_flags &
549 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
550 skb->rxhash =
551 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
552}
553
554int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
555{
556 struct bnx2x *bp = fp->bp;
557 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
558 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
559 int rx_pkt = 0;
560
561#ifdef BNX2X_STOP_ON_ERROR
562 if (unlikely(bp->panic))
563 return 0;
564#endif
565
566 /* CQ "next element" is of the size of the regular element,
567 that's why it's ok here */
568 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
569 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
570 hw_comp_cons++;
571
572 bd_cons = fp->rx_bd_cons;
573 bd_prod = fp->rx_bd_prod;
574 bd_prod_fw = bd_prod;
575 sw_comp_cons = fp->rx_comp_cons;
576 sw_comp_prod = fp->rx_comp_prod;
577
578 /* Memory barrier necessary as speculative reads of the rx
579 * buffer can be ahead of the index in the status block
580 */
581 rmb();
582
583 DP(NETIF_MSG_RX_STATUS,
584 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
585 fp->index, hw_comp_cons, sw_comp_cons);
586
587 while (sw_comp_cons != hw_comp_cons) {
588 struct sw_rx_bd *rx_buf = NULL;
589 struct sk_buff *skb;
590 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300591 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000592 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300593 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000594 u16 len, pad;
595
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300596#ifdef BNX2X_STOP_ON_ERROR
597 if (unlikely(bp->panic))
598 return 0;
599#endif
600
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000601 comp_ring_cons = RCQ_BD(sw_comp_cons);
602 bd_prod = RX_BD(bd_prod);
603 bd_cons = RX_BD(bd_cons);
604
605 /* Prefetch the page containing the BD descriptor
606 at producer's index. It will be needed when new skb is
607 allocated */
608 prefetch((void *)(PAGE_ALIGN((unsigned long)
609 (&fp->rx_desc_ring[bd_prod])) -
610 PAGE_SIZE + 1));
611
612 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300613 cqe_fp = &cqe->fast_path_cqe;
614 cqe_fp_flags = cqe_fp->type_error_flags;
615 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000616
617 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
618 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300619 cqe_fp_flags, cqe_fp->status_flags,
620 le32_to_cpu(cqe_fp->rss_hash_result),
621 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622
623 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300624 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000625 bnx2x_sp_event(fp, cqe);
626 goto next_cqe;
627
628 /* this is an rx packet */
629 } else {
630 rx_buf = &fp->rx_buf_ring[bd_cons];
631 skb = rx_buf->skb;
632 prefetch(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000633
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634 if (!CQE_TYPE_FAST(cqe_fp_type)) {
635#ifdef BNX2X_STOP_ON_ERROR
636 /* sanity check */
637 if (fp->disable_tpa &&
638 (CQE_TYPE_START(cqe_fp_type) ||
639 CQE_TYPE_STOP(cqe_fp_type)))
640 BNX2X_ERR("START/STOP packet while "
641 "disable_tpa type %x\n",
642 CQE_TYPE(cqe_fp_type));
643#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000644
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300645 if (CQE_TYPE_START(cqe_fp_type)) {
646 u16 queue = cqe_fp->queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000647 DP(NETIF_MSG_RX_STATUS,
648 "calling tpa_start on queue %d\n",
649 queue);
650
651 bnx2x_tpa_start(fp, queue, skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300652 bd_cons, bd_prod,
653 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000654
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300655 /* Set Toeplitz hash for LRO skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000656 bnx2x_set_skb_rxhash(bp, cqe, skb);
657
658 goto next_rx;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300659
660 } else {
661 u16 queue =
662 cqe->end_agg_cqe.queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000663 DP(NETIF_MSG_RX_STATUS,
664 "calling tpa_stop on queue %d\n",
665 queue);
666
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300667 bnx2x_tpa_stop(bp, fp, queue,
668 &cqe->end_agg_cqe,
669 comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000670#ifdef BNX2X_STOP_ON_ERROR
671 if (bp->panic)
672 return 0;
673#endif
674
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300675 bnx2x_update_sge_prod(fp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000676 goto next_cqe;
677 }
678 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300679 /* non TPA */
680 len = le16_to_cpu(cqe_fp->pkt_len);
681 pad = cqe_fp->placement_offset;
Vladislav Zolotarov9924caf2011-07-19 01:37:42 +0000682 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000683 dma_unmap_addr(rx_buf, mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300684 pad + RX_COPY_THRESH,
685 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000686 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000687
688 /* is this an error packet? */
689 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
690 DP(NETIF_MSG_RX_ERR,
691 "ERROR flags %x rx packet %u\n",
692 cqe_fp_flags, sw_comp_cons);
693 fp->eth_q_stats.rx_err_discard_pkt++;
694 goto reuse_rx;
695 }
696
697 /* Since we don't have a jumbo ring
698 * copy small packets if mtu > 1500
699 */
700 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
701 (len <= RX_COPY_THRESH)) {
702 struct sk_buff *new_skb;
703
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300704 new_skb = netdev_alloc_skb(bp->dev, len + pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000705 if (new_skb == NULL) {
706 DP(NETIF_MSG_RX_ERR,
707 "ERROR packet dropped "
708 "because of alloc failure\n");
709 fp->eth_q_stats.rx_skb_alloc_failed++;
710 goto reuse_rx;
711 }
712
713 /* aligned copy */
714 skb_copy_from_linear_data_offset(skb, pad,
715 new_skb->data + pad, len);
716 skb_reserve(new_skb, pad);
717 skb_put(new_skb, len);
718
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000719 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000720
721 skb = new_skb;
722
723 } else
724 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
725 dma_unmap_single(&bp->pdev->dev,
726 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800727 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000728 DMA_FROM_DEVICE);
729 skb_reserve(skb, pad);
730 skb_put(skb, len);
731
732 } else {
733 DP(NETIF_MSG_RX_ERR,
734 "ERROR packet dropped because "
735 "of alloc failure\n");
736 fp->eth_q_stats.rx_skb_alloc_failed++;
737reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000738 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000739 goto next_rx;
740 }
741
742 skb->protocol = eth_type_trans(skb, bp->dev);
743
744 /* Set Toeplitz hash for a none-LRO skb */
745 bnx2x_set_skb_rxhash(bp, cqe, skb);
746
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700747 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000748
Michał Mirosław66371c42011-04-12 09:38:23 +0000749 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300750
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000751 if (likely(BNX2X_RX_CSUM_OK(cqe)))
752 skb->ip_summed = CHECKSUM_UNNECESSARY;
753 else
754 fp->eth_q_stats.hw_csum_err++;
755 }
756 }
757
758 skb_record_rx_queue(skb, fp->index);
759
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300760 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
761 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000762 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300763 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000764 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000765
766
767next_rx:
768 rx_buf->skb = NULL;
769
770 bd_cons = NEXT_RX_IDX(bd_cons);
771 bd_prod = NEXT_RX_IDX(bd_prod);
772 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
773 rx_pkt++;
774next_cqe:
775 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
776 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
777
778 if (rx_pkt == budget)
779 break;
780 } /* while */
781
782 fp->rx_bd_cons = bd_cons;
783 fp->rx_bd_prod = bd_prod_fw;
784 fp->rx_comp_cons = sw_comp_cons;
785 fp->rx_comp_prod = sw_comp_prod;
786
787 /* Update producers */
788 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
789 fp->rx_sge_prod);
790
791 fp->rx_pkt += rx_pkt;
792 fp->rx_calls++;
793
794 return rx_pkt;
795}
796
797static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
798{
799 struct bnx2x_fastpath *fp = fp_cookie;
800 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000801 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000802
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000803 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
804 "[fp %d fw_sd %d igusb %d]\n",
805 fp->index, fp->fw_sb_id, fp->igu_sb_id);
806 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000807
808#ifdef BNX2X_STOP_ON_ERROR
809 if (unlikely(bp->panic))
810 return IRQ_HANDLED;
811#endif
812
813 /* Handle Rx and Tx according to MSI-X vector */
814 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000815
816 for_each_cos_in_tx_queue(fp, cos)
817 prefetch(fp->txdata[cos].tx_cons_sb);
818
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000819 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000820 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
821
822 return IRQ_HANDLED;
823}
824
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000825/* HW Lock for shared dual port PHYs */
826void bnx2x_acquire_phy_lock(struct bnx2x *bp)
827{
828 mutex_lock(&bp->port.phy_mutex);
829
830 if (bp->port.need_hw_lock)
831 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
832}
833
834void bnx2x_release_phy_lock(struct bnx2x *bp)
835{
836 if (bp->port.need_hw_lock)
837 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
838
839 mutex_unlock(&bp->port.phy_mutex);
840}
841
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800842/* calculates MF speed according to current linespeed and MF configuration */
843u16 bnx2x_get_mf_speed(struct bnx2x *bp)
844{
845 u16 line_speed = bp->link_vars.line_speed;
846 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000847 u16 maxCfg = bnx2x_extract_max_cfg(bp,
848 bp->mf_config[BP_VN(bp)]);
849
850 /* Calculate the current MAX line speed limit for the MF
851 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800852 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000853 if (IS_MF_SI(bp))
854 line_speed = (line_speed * maxCfg) / 100;
855 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800856 u16 vn_max_rate = maxCfg * 100;
857
858 if (vn_max_rate < line_speed)
859 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000860 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800861 }
862
863 return line_speed;
864}
865
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000866/**
867 * bnx2x_fill_report_data - fill link report data to report
868 *
869 * @bp: driver handle
870 * @data: link state to update
871 *
872 * It uses a none-atomic bit operations because is called under the mutex.
873 */
874static inline void bnx2x_fill_report_data(struct bnx2x *bp,
875 struct bnx2x_link_report_data *data)
876{
877 u16 line_speed = bnx2x_get_mf_speed(bp);
878
879 memset(data, 0, sizeof(*data));
880
881 /* Fill the report data: efective line speed */
882 data->line_speed = line_speed;
883
884 /* Link is down */
885 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
886 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
887 &data->link_report_flags);
888
889 /* Full DUPLEX */
890 if (bp->link_vars.duplex == DUPLEX_FULL)
891 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
892
893 /* Rx Flow Control is ON */
894 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
895 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
896
897 /* Tx Flow Control is ON */
898 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
899 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
900}
901
902/**
903 * bnx2x_link_report - report link status to OS.
904 *
905 * @bp: driver handle
906 *
907 * Calls the __bnx2x_link_report() under the same locking scheme
908 * as a link/PHY state managing code to ensure a consistent link
909 * reporting.
910 */
911
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000912void bnx2x_link_report(struct bnx2x *bp)
913{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000914 bnx2x_acquire_phy_lock(bp);
915 __bnx2x_link_report(bp);
916 bnx2x_release_phy_lock(bp);
917}
918
919/**
920 * __bnx2x_link_report - report link status to OS.
921 *
922 * @bp: driver handle
923 *
924 * None atomic inmlementation.
925 * Should be called under the phy_lock.
926 */
927void __bnx2x_link_report(struct bnx2x *bp)
928{
929 struct bnx2x_link_report_data cur_data;
930
931 /* reread mf_cfg */
932 if (!CHIP_IS_E1(bp))
933 bnx2x_read_mf_cfg(bp);
934
935 /* Read the current link report info */
936 bnx2x_fill_report_data(bp, &cur_data);
937
938 /* Don't report link down or exactly the same link status twice */
939 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
940 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
941 &bp->last_reported_link.link_report_flags) &&
942 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
943 &cur_data.link_report_flags)))
944 return;
945
946 bp->link_cnt++;
947
948 /* We are going to report a new link parameters now -
949 * remember the current data for the next time.
950 */
951 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
952
953 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
954 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000955 netif_carrier_off(bp->dev);
956 netdev_err(bp->dev, "NIC Link is Down\n");
957 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000958 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000959 const char *duplex;
960 const char *flow;
961
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000962 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000963
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000964 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
965 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000966 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000967 else
Joe Perches94f05b02011-08-14 12:16:20 +0000968 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000969
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000970 /* Handle the FC at the end so that only these flags would be
971 * possibly set. This way we may easily check if there is no FC
972 * enabled.
973 */
974 if (cur_data.link_report_flags) {
975 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
976 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000977 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
978 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000979 flow = "ON - receive & transmit";
980 else
981 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000982 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000983 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984 }
Joe Perches94f05b02011-08-14 12:16:20 +0000985 } else {
986 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000987 }
Joe Perches94f05b02011-08-14 12:16:20 +0000988 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
989 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990 }
991}
992
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000993void bnx2x_init_rx_rings(struct bnx2x *bp)
994{
995 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000996 u16 ring_prod;
997 int i, j;
998
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000999 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001000 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001001 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001002
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001003 DP(NETIF_MSG_IFUP,
1004 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1005
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001006 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001007 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001008 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001009 struct bnx2x_agg_info *tpa_info =
1010 &fp->tpa_info[i];
1011 struct sw_rx_bd *first_buf =
1012 &tpa_info->first_buf;
1013
1014 first_buf->skb = netdev_alloc_skb(bp->dev,
1015 fp->rx_buf_size);
1016 if (!first_buf->skb) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001017 BNX2X_ERR("Failed to allocate TPA "
1018 "skb pool for queue[%d] - "
1019 "disabling TPA on this "
1020 "queue!\n", j);
1021 bnx2x_free_tpa_pool(bp, fp, i);
1022 fp->disable_tpa = 1;
1023 break;
1024 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001025 dma_unmap_addr_set(first_buf, mapping, 0);
1026 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001027 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001028
1029 /* "next page" elements initialization */
1030 bnx2x_set_next_page_sgl(fp);
1031
1032 /* set SGEs bit mask */
1033 bnx2x_init_sge_ring_bit_mask(fp);
1034
1035 /* Allocate SGEs and initialize the ring elements */
1036 for (i = 0, ring_prod = 0;
1037 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1038
1039 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1040 BNX2X_ERR("was only able to allocate "
1041 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001042 BNX2X_ERR("disabling TPA for "
1043 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001044 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001045 bnx2x_free_rx_sge_range(bp, fp,
1046 ring_prod);
1047 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001048 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001049 fp->disable_tpa = 1;
1050 ring_prod = 0;
1051 break;
1052 }
1053 ring_prod = NEXT_SGE_IDX(ring_prod);
1054 }
1055
1056 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001057 }
1058 }
1059
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001060 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061 struct bnx2x_fastpath *fp = &bp->fp[j];
1062
1063 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001064
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001065 /* Activate BD ring */
1066 /* Warning!
1067 * this will generate an interrupt (to the TSTORM)
1068 * must only be done after chip is initialized
1069 */
1070 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1071 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001072
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001073 if (j != 0)
1074 continue;
1075
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001076 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001077 REG_WR(bp, BAR_USTRORM_INTMEM +
1078 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1079 U64_LO(fp->rx_comp_mapping));
1080 REG_WR(bp, BAR_USTRORM_INTMEM +
1081 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1082 U64_HI(fp->rx_comp_mapping));
1083 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084 }
1085}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001086
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001087static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1088{
1089 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001090 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001091
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001092 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001093 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001094 for_each_cos_in_tx_queue(fp, cos) {
1095 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001096
Ariel Elior6383c0b2011-07-14 08:31:57 +00001097 u16 bd_cons = txdata->tx_bd_cons;
1098 u16 sw_prod = txdata->tx_pkt_prod;
1099 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001100
Ariel Elior6383c0b2011-07-14 08:31:57 +00001101 while (sw_cons != sw_prod) {
1102 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1103 TX_BD(sw_cons));
1104 sw_cons++;
1105 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001106 }
1107 }
1108}
1109
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001110static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1111{
1112 struct bnx2x *bp = fp->bp;
1113 int i;
1114
1115 /* ring wasn't allocated */
1116 if (fp->rx_buf_ring == NULL)
1117 return;
1118
1119 for (i = 0; i < NUM_RX_BD; i++) {
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1121 struct sk_buff *skb = rx_buf->skb;
1122
1123 if (skb == NULL)
1124 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001125 dma_unmap_single(&bp->pdev->dev,
1126 dma_unmap_addr(rx_buf, mapping),
1127 fp->rx_buf_size, DMA_FROM_DEVICE);
1128
1129 rx_buf->skb = NULL;
1130 dev_kfree_skb(skb);
1131 }
1132}
1133
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001134static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1135{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001136 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001137
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001138 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001139 struct bnx2x_fastpath *fp = &bp->fp[j];
1140
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001141 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001142
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001144 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001145 }
1146}
1147
1148void bnx2x_free_skbs(struct bnx2x *bp)
1149{
1150 bnx2x_free_tx_skbs(bp);
1151 bnx2x_free_rx_skbs(bp);
1152}
1153
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001154void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1155{
1156 /* load old values */
1157 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1158
1159 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1160 /* leave all but MAX value */
1161 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1162
1163 /* set new MAX value */
1164 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1165 & FUNC_MF_CFG_MAX_BW_MASK;
1166
1167 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1168 }
1169}
1170
Dmitry Kravkovca924292011-06-14 01:33:08 +00001171/**
1172 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1173 *
1174 * @bp: driver handle
1175 * @nvecs: number of vectors to be released
1176 */
1177static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001179 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001180
Dmitry Kravkovca924292011-06-14 01:33:08 +00001181 if (nvecs == offset)
1182 return;
1183 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001184 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001185 bp->msix_table[offset].vector);
1186 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001188 if (nvecs == offset)
1189 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001190 offset++;
1191#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001192
Dmitry Kravkovca924292011-06-14 01:33:08 +00001193 for_each_eth_queue(bp, i) {
1194 if (nvecs == offset)
1195 return;
1196 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1197 "irq\n", i, bp->msix_table[offset].vector);
1198
1199 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001200 }
1201}
1202
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001203void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001205 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001206 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001207 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001208 else if (bp->flags & USING_MSI_FLAG)
1209 free_irq(bp->pdev->irq, bp->dev);
1210 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001211 free_irq(bp->pdev->irq, bp->dev);
1212}
1213
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001214int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001216 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001218 bp->msix_table[msix_vec].entry = msix_vec;
1219 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1220 bp->msix_table[0].entry);
1221 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001222
1223#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001224 bp->msix_table[msix_vec].entry = msix_vec;
1225 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1226 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1227 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001228#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001229 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001230 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001231 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001233 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1234 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001235 }
1236
Ariel Elior6383c0b2011-07-14 08:31:57 +00001237 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001238
1239 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001240
1241 /*
1242 * reconfigure number of tx/rx queues according to available
1243 * MSI-X vectors
1244 */
1245 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001246 /* how less vectors we will have? */
1247 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001248
1249 DP(NETIF_MSG_IFUP,
1250 "Trying to use less MSI-X vectors: %d\n", rc);
1251
1252 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1253
1254 if (rc) {
1255 DP(NETIF_MSG_IFUP,
1256 "MSI-X is not attainable rc %d\n", rc);
1257 return rc;
1258 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001259 /*
1260 * decrease number of queues by number of unallocated entries
1261 */
1262 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001263
1264 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1265 bp->num_queues);
1266 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001267 /* fall to INTx if not enough memory */
1268 if (rc == -ENOMEM)
1269 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001270 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1271 return rc;
1272 }
1273
1274 bp->flags |= USING_MSIX_FLAG;
1275
1276 return 0;
1277}
1278
1279static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1280{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001281 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001282
Dmitry Kravkovca924292011-06-14 01:33:08 +00001283 rc = request_irq(bp->msix_table[offset++].vector,
1284 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001285 bp->dev->name, bp->dev);
1286 if (rc) {
1287 BNX2X_ERR("request sp irq failed\n");
1288 return -EBUSY;
1289 }
1290
1291#ifdef BCM_CNIC
1292 offset++;
1293#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001294 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295 struct bnx2x_fastpath *fp = &bp->fp[i];
1296 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1297 bp->dev->name, i);
1298
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001299 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300 bnx2x_msix_fp_int, 0, fp->name, fp);
1301 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001302 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1303 bp->msix_table[offset].vector, rc);
1304 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001305 return -EBUSY;
1306 }
1307
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001308 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001309 }
1310
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001311 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001312 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001313 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1314 " ... fp[%d] %d\n",
1315 bp->msix_table[0].vector,
1316 0, bp->msix_table[offset].vector,
1317 i - 1, bp->msix_table[offset + i - 1].vector);
1318
1319 return 0;
1320}
1321
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001322int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001323{
1324 int rc;
1325
1326 rc = pci_enable_msi(bp->pdev);
1327 if (rc) {
1328 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1329 return -1;
1330 }
1331 bp->flags |= USING_MSI_FLAG;
1332
1333 return 0;
1334}
1335
1336static int bnx2x_req_irq(struct bnx2x *bp)
1337{
1338 unsigned long flags;
1339 int rc;
1340
1341 if (bp->flags & USING_MSI_FLAG)
1342 flags = 0;
1343 else
1344 flags = IRQF_SHARED;
1345
1346 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1347 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001348 return rc;
1349}
1350
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001351static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1352{
1353 int rc = 0;
1354 if (bp->flags & USING_MSIX_FLAG) {
1355 rc = bnx2x_req_msix_irqs(bp);
1356 if (rc)
1357 return rc;
1358 } else {
1359 bnx2x_ack_int(bp);
1360 rc = bnx2x_req_irq(bp);
1361 if (rc) {
1362 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1363 return rc;
1364 }
1365 if (bp->flags & USING_MSI_FLAG) {
1366 bp->dev->irq = bp->pdev->irq;
1367 netdev_info(bp->dev, "using MSI IRQ %d\n",
1368 bp->pdev->irq);
1369 }
1370 }
1371
1372 return 0;
1373}
1374
1375static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001376{
1377 int i;
1378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001379 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001380 napi_enable(&bnx2x_fp(bp, i, napi));
1381}
1382
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001383static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384{
1385 int i;
1386
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001387 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001388 napi_disable(&bnx2x_fp(bp, i, napi));
1389}
1390
1391void bnx2x_netif_start(struct bnx2x *bp)
1392{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001393 if (netif_running(bp->dev)) {
1394 bnx2x_napi_enable(bp);
1395 bnx2x_int_enable(bp);
1396 if (bp->state == BNX2X_STATE_OPEN)
1397 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398 }
1399}
1400
1401void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1402{
1403 bnx2x_int_disable_sync(bp, disable_hw);
1404 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001405}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001406
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001407u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1408{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001409 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001410
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001411#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001412 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001413 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1414 u16 ether_type = ntohs(hdr->h_proto);
1415
1416 /* Skip VLAN tag if present */
1417 if (ether_type == ETH_P_8021Q) {
1418 struct vlan_ethhdr *vhdr =
1419 (struct vlan_ethhdr *)skb->data;
1420
1421 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1422 }
1423
1424 /* If ethertype is FCoE or FIP - use FCoE ring */
1425 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001426 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001427 }
1428#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001429 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001430 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001431}
1432
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001433void bnx2x_set_num_queues(struct bnx2x *bp)
1434{
1435 switch (bp->multi_mode) {
1436 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001437 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001438 break;
1439 case ETH_RSS_MODE_REGULAR:
1440 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001441 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001442
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001444 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001445 break;
1446 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001447
1448 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001449 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001450}
1451
David S. Miller823dcd22011-08-20 10:39:12 -07001452/**
1453 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1454 *
1455 * @bp: Driver handle
1456 *
1457 * We currently support for at most 16 Tx queues for each CoS thus we will
1458 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1459 * bp->max_cos.
1460 *
1461 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1462 * index after all ETH L2 indices.
1463 *
1464 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1465 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1466 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1467 *
1468 * The proper configuration of skb->queue_mapping is handled by
1469 * bnx2x_select_queue() and __skb_tx_hash().
1470 *
1471 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1472 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1473 */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001474static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1475{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001476 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001477
Ariel Elior6383c0b2011-07-14 08:31:57 +00001478 tx = MAX_TXQS_PER_COS * bp->max_cos;
1479 rx = BNX2X_NUM_ETH_QUEUES(bp);
1480
1481/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001482#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001483 if (!NO_FCOE(bp)) {
1484 rx += FCOE_PRESENT;
1485 tx += FCOE_PRESENT;
1486 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001487#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001488
1489 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1490 if (rc) {
1491 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1492 return rc;
1493 }
1494 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1495 if (rc) {
1496 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1497 return rc;
1498 }
1499
1500 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1501 tx, rx);
1502
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001503 return rc;
1504}
1505
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001506static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1507{
1508 int i;
1509
1510 for_each_queue(bp, i) {
1511 struct bnx2x_fastpath *fp = &bp->fp[i];
1512
1513 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1514 if (IS_FCOE_IDX(i))
1515 /*
1516 * Although there are no IP frames expected to arrive to
1517 * this ring we still want to add an
1518 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1519 * overrun attack.
1520 */
1521 fp->rx_buf_size =
1522 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001523 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001524 else
1525 fp->rx_buf_size =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001526 bp->dev->mtu + ETH_OVREHEAD +
1527 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001528 }
1529}
1530
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001531static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1532{
1533 int i;
1534 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1535 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1536
1537 /*
1538 * Prepare the inital contents fo the indirection table if RSS is
1539 * enabled
1540 */
1541 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1542 for (i = 0; i < sizeof(ind_table); i++)
1543 ind_table[i] =
1544 bp->fp->cl_id + (i % num_eth_queues);
1545 }
1546
1547 /*
1548 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1549 * per-port, so if explicit configuration is needed , do it only
1550 * for a PMF.
1551 *
1552 * For 57712 and newer on the other hand it's a per-function
1553 * configuration.
1554 */
1555 return bnx2x_config_rss_pf(bp, ind_table,
1556 bp->port.pmf || !CHIP_IS_E1x(bp));
1557}
1558
1559int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1560{
1561 struct bnx2x_config_rss_params params = {0};
1562 int i;
1563
1564 /* Although RSS is meaningless when there is a single HW queue we
1565 * still need it enabled in order to have HW Rx hash generated.
1566 *
1567 * if (!is_eth_multi(bp))
1568 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1569 */
1570
1571 params.rss_obj = &bp->rss_conf_obj;
1572
1573 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1574
1575 /* RSS mode */
1576 switch (bp->multi_mode) {
1577 case ETH_RSS_MODE_DISABLED:
1578 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1579 break;
1580 case ETH_RSS_MODE_REGULAR:
1581 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1582 break;
1583 case ETH_RSS_MODE_VLAN_PRI:
1584 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1585 break;
1586 case ETH_RSS_MODE_E1HOV_PRI:
1587 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1588 break;
1589 case ETH_RSS_MODE_IP_DSCP:
1590 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1591 break;
1592 default:
1593 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1594 return -EINVAL;
1595 }
1596
1597 /* If RSS is enabled */
1598 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1599 /* RSS configuration */
1600 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1601 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1602 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1603 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1604
1605 /* Hash bits */
1606 params.rss_result_mask = MULTI_MASK;
1607
1608 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1609
1610 if (config_hash) {
1611 /* RSS keys */
1612 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1613 params.rss_key[i] = random32();
1614
1615 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1616 }
1617 }
1618
1619 return bnx2x_config_rss(bp, &params);
1620}
1621
1622static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1623{
1624 struct bnx2x_func_state_params func_params = {0};
1625
1626 /* Prepare parameters for function state transitions */
1627 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1628
1629 func_params.f_obj = &bp->func_obj;
1630 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1631
1632 func_params.params.hw_init.load_phase = load_code;
1633
1634 return bnx2x_func_state_change(bp, &func_params);
1635}
1636
1637/*
1638 * Cleans the object that have internal lists without sending
1639 * ramrods. Should be run when interrutps are disabled.
1640 */
1641static void bnx2x_squeeze_objects(struct bnx2x *bp)
1642{
1643 int rc;
1644 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1645 struct bnx2x_mcast_ramrod_params rparam = {0};
1646 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1647
1648 /***************** Cleanup MACs' object first *************************/
1649
1650 /* Wait for completion of requested */
1651 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1652 /* Perform a dry cleanup */
1653 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1654
1655 /* Clean ETH primary MAC */
1656 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1657 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1658 &ramrod_flags);
1659 if (rc != 0)
1660 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1661
1662 /* Cleanup UC list */
1663 vlan_mac_flags = 0;
1664 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1665 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1666 &ramrod_flags);
1667 if (rc != 0)
1668 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1669
1670 /***************** Now clean mcast object *****************************/
1671 rparam.mcast_obj = &bp->mcast_obj;
1672 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1673
1674 /* Add a DEL command... */
1675 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1676 if (rc < 0)
1677 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1678 "object: %d\n", rc);
1679
1680 /* ...and wait until all pending commands are cleared */
1681 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1682 while (rc != 0) {
1683 if (rc < 0) {
1684 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1685 rc);
1686 return;
1687 }
1688
1689 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1690 }
1691}
1692
1693#ifndef BNX2X_STOP_ON_ERROR
1694#define LOAD_ERROR_EXIT(bp, label) \
1695 do { \
1696 (bp)->state = BNX2X_STATE_ERROR; \
1697 goto label; \
1698 } while (0)
1699#else
1700#define LOAD_ERROR_EXIT(bp, label) \
1701 do { \
1702 (bp)->state = BNX2X_STATE_ERROR; \
1703 (bp)->panic = 1; \
1704 return -EBUSY; \
1705 } while (0)
1706#endif
1707
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001708/* must be called with rtnl_lock */
1709int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1710{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001711 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001712 u32 load_code;
1713 int i, rc;
1714
1715#ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1717 return -EPERM;
1718#endif
1719
1720 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1721
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001722 /* Set the initial link reported state to link down */
1723 bnx2x_acquire_phy_lock(bp);
1724 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1725 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1726 &bp->last_reported_link.link_report_flags);
1727 bnx2x_release_phy_lock(bp);
1728
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001729 /* must be called before memory allocation and HW init */
1730 bnx2x_ilt_set_info(bp);
1731
Ariel Elior6383c0b2011-07-14 08:31:57 +00001732 /*
1733 * Zero fastpath structures preserving invariants like napi, which are
1734 * allocated only once, fp index, max_cos, bp pointer.
1735 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001736 */
1737 for_each_queue(bp, i)
1738 bnx2x_bz_fp(bp, i);
1739
Ariel Elior6383c0b2011-07-14 08:31:57 +00001740
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001741 /* Set the receive queues buffer size */
1742 bnx2x_set_rx_buf_size(bp);
1743
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001744 if (bnx2x_alloc_mem(bp))
1745 return -ENOMEM;
1746
1747 /* As long as bnx2x_alloc_mem() may possibly update
1748 * bp->num_queues, bnx2x_set_real_num_queues() should always
1749 * come after it.
1750 */
1751 rc = bnx2x_set_real_num_queues(bp);
1752 if (rc) {
1753 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001754 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001755 }
1756
Ariel Elior6383c0b2011-07-14 08:31:57 +00001757 /* configure multi cos mappings in kernel.
1758 * this configuration may be overriden by a multi class queue discipline
1759 * or by a dcbx negotiation result.
1760 */
1761 bnx2x_setup_tc(bp->dev, bp->max_cos);
1762
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001763 bnx2x_napi_enable(bp);
1764
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001765 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001766 * Returns the type of LOAD command:
1767 * if it is the first port to be initialized
1768 * common blocks should be initialized, otherwise - not
1769 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001770 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001771 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772 if (!load_code) {
1773 BNX2X_ERR("MCP response failure, aborting\n");
1774 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001775 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001776 }
1777 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1778 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001779 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001780 }
1781
1782 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001783 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001784
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001785 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1786 path, load_count[path][0], load_count[path][1],
1787 load_count[path][2]);
1788 load_count[path][0]++;
1789 load_count[path][1 + port]++;
1790 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1791 path, load_count[path][0], load_count[path][1],
1792 load_count[path][2]);
1793 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001794 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001795 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001796 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1797 else
1798 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1799 }
1800
1801 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001802 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001803 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001804 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001805 /*
1806 * We need the barrier to ensure the ordering between the
1807 * writing to bp->port.pmf here and reading it from the
1808 * bnx2x_periodic_task().
1809 */
1810 smp_mb();
1811 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1812 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001813 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001814
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001815 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1816
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001817 /* Init Function state controlling object */
1818 bnx2x__init_func_obj(bp);
1819
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001820 /* Initialize HW */
1821 rc = bnx2x_init_hw(bp, load_code);
1822 if (rc) {
1823 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001824 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001825 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001826 }
1827
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001828 /* Connect to IRQs */
1829 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001830 if (rc) {
1831 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001832 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001833 }
1834
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001835 /* Setup NIC internals and enable interrupts */
1836 bnx2x_nic_init(bp, load_code);
1837
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001838 /* Init per-function objects */
1839 bnx2x_init_bp_objs(bp);
1840
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001841 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1842 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001843 (bp->common.shmem2_base)) {
1844 if (SHMEM2_HAS(bp, dcc_support))
1845 SHMEM2_WR(bp, dcc_support,
1846 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1847 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1848 }
1849
1850 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1851 rc = bnx2x_func_start(bp);
1852 if (rc) {
1853 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001854 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001855 LOAD_ERROR_EXIT(bp, load_error3);
1856 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001857
1858 /* Send LOAD_DONE command to MCP */
1859 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001860 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001861 if (!load_code) {
1862 BNX2X_ERR("MCP response failure, aborting\n");
1863 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001864 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001865 }
1866 }
1867
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001868 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001869 if (rc) {
1870 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001871 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001872 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001873
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001874#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001875 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001876 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001877#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001878
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001879 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001880 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001881 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001882 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883 }
1884
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001885 rc = bnx2x_init_rss_pf(bp);
1886 if (rc)
1887 LOAD_ERROR_EXIT(bp, load_error4);
1888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001889 /* Now when Clients are configured we are ready to work */
1890 bp->state = BNX2X_STATE_OPEN;
1891
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001892 /* Configure a ucast MAC */
1893 rc = bnx2x_set_eth_mac(bp, true);
1894 if (rc)
1895 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001896
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001897 if (bp->pending_max) {
1898 bnx2x_update_max_mf_config(bp, bp->pending_max);
1899 bp->pending_max = 0;
1900 }
1901
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001902 if (bp->port.pmf)
1903 bnx2x_initial_phy_init(bp, load_mode);
1904
1905 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001906
1907 /* Initialize Rx filter. */
1908 netif_addr_lock_bh(bp->dev);
1909 bnx2x_set_rx_mode(bp->dev);
1910 netif_addr_unlock_bh(bp->dev);
1911
1912 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001913 switch (load_mode) {
1914 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001915 /* Tx queue should be only reenabled */
1916 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001917 break;
1918
1919 case LOAD_OPEN:
1920 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001921 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001922 break;
1923
1924 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001925 bp->state = BNX2X_STATE_DIAG;
1926 break;
1927
1928 default:
1929 break;
1930 }
1931
1932 if (!bp->port.pmf)
1933 bnx2x__link_status_update(bp);
1934
1935 /* start the timer */
1936 mod_timer(&bp->timer, jiffies + bp->current_interval);
1937
1938#ifdef BCM_CNIC
1939 bnx2x_setup_cnic_irq_info(bp);
1940 if (bp->state == BNX2X_STATE_OPEN)
1941 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1942#endif
1943 bnx2x_inc_load_cnt(bp);
1944
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001945 /* Wait for all pending SP commands to complete */
1946 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1947 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1948 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1949 return -EBUSY;
1950 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001951
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001952 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001953 return 0;
1954
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001955#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001956load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001958 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001959 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001960#endif
1961load_error3:
1962 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001963
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001964 /* Clean queueable objects */
1965 bnx2x_squeeze_objects(bp);
1966
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001967 /* Free SKBs, SGEs, TPA pool and driver internals */
1968 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001969 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001970 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001971
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001972 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001973 bnx2x_free_irq(bp);
1974load_error2:
1975 if (!BP_NOMCP(bp)) {
1976 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1977 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1978 }
1979
1980 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001981load_error1:
1982 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001983load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001984 bnx2x_free_mem(bp);
1985
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001986 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001987#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001988}
1989
1990/* must be called with rtnl_lock */
1991int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1992{
1993 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001994 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001995
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001996 if ((bp->state == BNX2X_STATE_CLOSED) ||
1997 (bp->state == BNX2X_STATE_ERROR)) {
1998 /* We can get here if the driver has been unloaded
1999 * during parity error recovery and is either waiting for a
2000 * leader to complete or for other functions to unload and
2001 * then ifdown has been issued. In this case we want to
2002 * unload and let other functions to complete a recovery
2003 * process.
2004 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002005 bp->recovery_state = BNX2X_RECOVERY_DONE;
2006 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002007 bnx2x_release_leader_lock(bp);
2008 smp_mb();
2009
2010 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002011
2012 return -EINVAL;
2013 }
2014
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002015 /*
2016 * It's important to set the bp->state to the value different from
2017 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2018 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2019 */
2020 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2021 smp_mb();
2022
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002023 /* Stop Tx */
2024 bnx2x_tx_disable(bp);
2025
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002026#ifdef BCM_CNIC
2027 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2028#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002029
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002030 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002031
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002032 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002033
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002034 /* Set ALWAYS_ALIVE bit in shmem */
2035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2036
2037 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002039 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002040
2041 /* Cleanup the chip if needed */
2042 if (unload_mode != UNLOAD_RECOVERY)
2043 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002044 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002045 /* Send the UNLOAD_REQUEST to the MCP */
2046 bnx2x_send_unload_req(bp, unload_mode);
2047
2048 /*
2049 * Prevent transactions to host from the functions on the
2050 * engine that doesn't reset global blocks in case of global
2051 * attention once gloabl blocks are reset and gates are opened
2052 * (the engine which leader will perform the recovery
2053 * last).
2054 */
2055 if (!CHIP_IS_E1x(bp))
2056 bnx2x_pf_disable(bp);
2057
2058 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002059 bnx2x_netif_stop(bp, 1);
2060
2061 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002062 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002063
2064 /* Report UNLOAD_DONE to MCP */
2065 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002066 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002067
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002068 /*
2069 * At this stage no more interrupts will arrive so we may safly clean
2070 * the queueable objects here in case they failed to get cleaned so far.
2071 */
2072 bnx2x_squeeze_objects(bp);
2073
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002074 /* There should be no more pending SP commands at this stage */
2075 bp->sp_state = 0;
2076
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002077 bp->port.pmf = 0;
2078
2079 /* Free SKBs, SGEs, TPA pool and driver internals */
2080 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002081 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002082 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002083
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002084 bnx2x_free_mem(bp);
2085
2086 bp->state = BNX2X_STATE_CLOSED;
2087
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002088 /* Check if there are pending parity attentions. If there are - set
2089 * RECOVERY_IN_PROGRESS.
2090 */
2091 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2092 bnx2x_set_reset_in_progress(bp);
2093
2094 /* Set RESET_IS_GLOBAL if needed */
2095 if (global)
2096 bnx2x_set_reset_global(bp);
2097 }
2098
2099
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002100 /* The last driver must disable a "close the gate" if there is no
2101 * parity attention or "process kill" pending.
2102 */
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002103 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002104 bnx2x_disable_close_the_gate(bp);
2105
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002106 return 0;
2107}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002108
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002109int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2110{
2111 u16 pmcsr;
2112
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002113 /* If there is no power capability, silently succeed */
2114 if (!bp->pm_cap) {
2115 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2116 return 0;
2117 }
2118
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002119 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2120
2121 switch (state) {
2122 case PCI_D0:
2123 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2124 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2125 PCI_PM_CTRL_PME_STATUS));
2126
2127 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2128 /* delay required during transition out of D3hot */
2129 msleep(20);
2130 break;
2131
2132 case PCI_D3hot:
2133 /* If there are other clients above don't
2134 shut down the power */
2135 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2136 return 0;
2137 /* Don't shut down the power for emulation and FPGA */
2138 if (CHIP_REV_IS_SLOW(bp))
2139 return 0;
2140
2141 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2142 pmcsr |= 3;
2143
2144 if (bp->wol)
2145 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2146
2147 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2148 pmcsr);
2149
2150 /* No more memory access after this point until
2151 * device is brought back to D0.
2152 */
2153 break;
2154
2155 default:
2156 return -EINVAL;
2157 }
2158 return 0;
2159}
2160
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002161/*
2162 * net_device service functions
2163 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002164int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002165{
2166 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002167 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002168 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2169 napi);
2170 struct bnx2x *bp = fp->bp;
2171
2172 while (1) {
2173#ifdef BNX2X_STOP_ON_ERROR
2174 if (unlikely(bp->panic)) {
2175 napi_complete(napi);
2176 return 0;
2177 }
2178#endif
2179
Ariel Elior6383c0b2011-07-14 08:31:57 +00002180 for_each_cos_in_tx_queue(fp, cos)
2181 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2182 bnx2x_tx_int(bp, &fp->txdata[cos]);
2183
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002184
2185 if (bnx2x_has_rx_work(fp)) {
2186 work_done += bnx2x_rx_int(fp, budget - work_done);
2187
2188 /* must not complete if we consumed full budget */
2189 if (work_done >= budget)
2190 break;
2191 }
2192
2193 /* Fall out from the NAPI loop if needed */
2194 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002195#ifdef BCM_CNIC
2196 /* No need to update SB for FCoE L2 ring as long as
2197 * it's connected to the default SB and the SB
2198 * has been updated when NAPI was scheduled.
2199 */
2200 if (IS_FCOE_FP(fp)) {
2201 napi_complete(napi);
2202 break;
2203 }
2204#endif
2205
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002206 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002207 /* bnx2x_has_rx_work() reads the status block,
2208 * thus we need to ensure that status block indices
2209 * have been actually read (bnx2x_update_fpsb_idx)
2210 * prior to this check (bnx2x_has_rx_work) so that
2211 * we won't write the "newer" value of the status block
2212 * to IGU (if there was a DMA right after
2213 * bnx2x_has_rx_work and if there is no rmb, the memory
2214 * reading (bnx2x_update_fpsb_idx) may be postponed
2215 * to right before bnx2x_ack_sb). In this case there
2216 * will never be another interrupt until there is
2217 * another update of the status block, while there
2218 * is still unhandled work.
2219 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002220 rmb();
2221
2222 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2223 napi_complete(napi);
2224 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002225 DP(NETIF_MSG_HW,
2226 "Update index to %d\n", fp->fp_hc_idx);
2227 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2228 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002229 IGU_INT_ENABLE, 1);
2230 break;
2231 }
2232 }
2233 }
2234
2235 return work_done;
2236}
2237
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002238/* we split the first BD into headers and data BDs
2239 * to ease the pain of our fellow microcode engineers
2240 * we use one mapping for both BDs
2241 * So far this has only been observed to happen
2242 * in Other Operating Systems(TM)
2243 */
2244static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002245 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002246 struct sw_tx_bd *tx_buf,
2247 struct eth_tx_start_bd **tx_bd, u16 hlen,
2248 u16 bd_prod, int nbd)
2249{
2250 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2251 struct eth_tx_bd *d_tx_bd;
2252 dma_addr_t mapping;
2253 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2254
2255 /* first fix first BD */
2256 h_tx_bd->nbd = cpu_to_le16(nbd);
2257 h_tx_bd->nbytes = cpu_to_le16(hlen);
2258
2259 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2260 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2261 h_tx_bd->addr_lo, h_tx_bd->nbd);
2262
2263 /* now get a new data BD
2264 * (after the pbd) and fill it */
2265 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002266 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002267
2268 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2269 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2270
2271 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2272 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2273 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2274
2275 /* this marks the BD as one that has no individual mapping */
2276 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2277
2278 DP(NETIF_MSG_TX_QUEUED,
2279 "TSO split data size is %d (%x:%x)\n",
2280 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2281
2282 /* update tx_bd */
2283 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2284
2285 return bd_prod;
2286}
2287
2288static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2289{
2290 if (fix > 0)
2291 csum = (u16) ~csum_fold(csum_sub(csum,
2292 csum_partial(t_header - fix, fix, 0)));
2293
2294 else if (fix < 0)
2295 csum = (u16) ~csum_fold(csum_add(csum,
2296 csum_partial(t_header, -fix, 0)));
2297
2298 return swab16(csum);
2299}
2300
2301static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2302{
2303 u32 rc;
2304
2305 if (skb->ip_summed != CHECKSUM_PARTIAL)
2306 rc = XMIT_PLAIN;
2307
2308 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002309 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002310 rc = XMIT_CSUM_V6;
2311 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2312 rc |= XMIT_CSUM_TCP;
2313
2314 } else {
2315 rc = XMIT_CSUM_V4;
2316 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2317 rc |= XMIT_CSUM_TCP;
2318 }
2319 }
2320
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002321 if (skb_is_gso_v6(skb))
2322 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2323 else if (skb_is_gso(skb))
2324 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002325
2326 return rc;
2327}
2328
2329#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2330/* check if packet requires linearization (packet is too fragmented)
2331 no need to check fragmentation if page size > 8K (there will be no
2332 violation to FW restrictions) */
2333static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2334 u32 xmit_type)
2335{
2336 int to_copy = 0;
2337 int hlen = 0;
2338 int first_bd_sz = 0;
2339
2340 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2341 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2342
2343 if (xmit_type & XMIT_GSO) {
2344 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2345 /* Check if LSO packet needs to be copied:
2346 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2347 int wnd_size = MAX_FETCH_BD - 3;
2348 /* Number of windows to check */
2349 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2350 int wnd_idx = 0;
2351 int frag_idx = 0;
2352 u32 wnd_sum = 0;
2353
2354 /* Headers length */
2355 hlen = (int)(skb_transport_header(skb) - skb->data) +
2356 tcp_hdrlen(skb);
2357
2358 /* Amount of data (w/o headers) on linear part of SKB*/
2359 first_bd_sz = skb_headlen(skb) - hlen;
2360
2361 wnd_sum = first_bd_sz;
2362
2363 /* Calculate the first sum - it's special */
2364 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2365 wnd_sum +=
2366 skb_shinfo(skb)->frags[frag_idx].size;
2367
2368 /* If there was data on linear skb data - check it */
2369 if (first_bd_sz > 0) {
2370 if (unlikely(wnd_sum < lso_mss)) {
2371 to_copy = 1;
2372 goto exit_lbl;
2373 }
2374
2375 wnd_sum -= first_bd_sz;
2376 }
2377
2378 /* Others are easier: run through the frag list and
2379 check all windows */
2380 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2381 wnd_sum +=
2382 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2383
2384 if (unlikely(wnd_sum < lso_mss)) {
2385 to_copy = 1;
2386 break;
2387 }
2388 wnd_sum -=
2389 skb_shinfo(skb)->frags[wnd_idx].size;
2390 }
2391 } else {
2392 /* in non-LSO too fragmented packet should always
2393 be linearized */
2394 to_copy = 1;
2395 }
2396 }
2397
2398exit_lbl:
2399 if (unlikely(to_copy))
2400 DP(NETIF_MSG_TX_QUEUED,
2401 "Linearization IS REQUIRED for %s packet. "
2402 "num_frags %d hlen %d first_bd_sz %d\n",
2403 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2404 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2405
2406 return to_copy;
2407}
2408#endif
2409
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002410static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2411 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002412{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002413 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2414 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2415 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002416 if ((xmit_type & XMIT_GSO_V6) &&
2417 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002418 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002419}
2420
2421/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002422 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002423 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002424 * @skb: packet skb
2425 * @pbd: parse BD
2426 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002427 */
2428static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2429 struct eth_tx_parse_bd_e1x *pbd,
2430 u32 xmit_type)
2431{
2432 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2433 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2434 pbd->tcp_flags = pbd_tcp_flags(skb);
2435
2436 if (xmit_type & XMIT_GSO_V4) {
2437 pbd->ip_id = swab16(ip_hdr(skb)->id);
2438 pbd->tcp_pseudo_csum =
2439 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2440 ip_hdr(skb)->daddr,
2441 0, IPPROTO_TCP, 0));
2442
2443 } else
2444 pbd->tcp_pseudo_csum =
2445 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2446 &ipv6_hdr(skb)->daddr,
2447 0, IPPROTO_TCP, 0));
2448
2449 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2450}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002451
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002452/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002453 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002454 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002455 * @bp: driver handle
2456 * @skb: packet skb
2457 * @parsing_data: data to be updated
2458 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002459 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002460 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002461 */
2462static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002463 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002464{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002465 *parsing_data |=
2466 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2467 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2468 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002469
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002470 if (xmit_type & XMIT_CSUM_TCP) {
2471 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2472 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2473 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002474
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002475 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2476 } else
2477 /* We support checksum offload for TCP and UDP only.
2478 * No need to pass the UDP header length - it's a constant.
2479 */
2480 return skb_transport_header(skb) +
2481 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002482}
2483
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002484static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2485 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2486{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002487 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2488
2489 if (xmit_type & XMIT_CSUM_V4)
2490 tx_start_bd->bd_flags.as_bitfield |=
2491 ETH_TX_BD_FLAGS_IP_CSUM;
2492 else
2493 tx_start_bd->bd_flags.as_bitfield |=
2494 ETH_TX_BD_FLAGS_IPV6;
2495
2496 if (!(xmit_type & XMIT_CSUM_TCP))
2497 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002498}
2499
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002500/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002501 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002502 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002503 * @bp: driver handle
2504 * @skb: packet skb
2505 * @pbd: parse BD to be updated
2506 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002507 */
2508static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2509 struct eth_tx_parse_bd_e1x *pbd,
2510 u32 xmit_type)
2511{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002512 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002513
2514 /* for now NS flag is not used in Linux */
2515 pbd->global_data =
2516 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2517 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2518
2519 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002520 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002521
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002522 hlen += pbd->ip_hlen_w;
2523
2524 /* We support checksum offload for TCP and UDP only */
2525 if (xmit_type & XMIT_CSUM_TCP)
2526 hlen += tcp_hdrlen(skb) / 2;
2527 else
2528 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002529
2530 pbd->total_hlen_w = cpu_to_le16(hlen);
2531 hlen = hlen*2;
2532
2533 if (xmit_type & XMIT_CSUM_TCP) {
2534 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2535
2536 } else {
2537 s8 fix = SKB_CS_OFF(skb); /* signed! */
2538
2539 DP(NETIF_MSG_TX_QUEUED,
2540 "hlen %d fix %d csum before fix %x\n",
2541 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2542
2543 /* HW bug: fixup the CSUM */
2544 pbd->tcp_pseudo_csum =
2545 bnx2x_csum_fix(skb_transport_header(skb),
2546 SKB_CS(skb), fix);
2547
2548 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2549 pbd->tcp_pseudo_csum);
2550 }
2551
2552 return hlen;
2553}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002554
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002555/* called with netif_tx_lock
2556 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2557 * netif_wake_queue()
2558 */
2559netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2560{
2561 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002562
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002563 struct bnx2x_fastpath *fp;
2564 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002565 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002566 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002567 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002568 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002569 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002570 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002571 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002572 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002573 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002574 dma_addr_t mapping;
2575 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2576 int i;
2577 u8 hlen = 0;
2578 __le16 pkt_size = 0;
2579 struct ethhdr *eth;
2580 u8 mac_type = UNICAST_ADDRESS;
2581
2582#ifdef BNX2X_STOP_ON_ERROR
2583 if (unlikely(bp->panic))
2584 return NETDEV_TX_BUSY;
2585#endif
2586
Ariel Elior6383c0b2011-07-14 08:31:57 +00002587 txq_index = skb_get_queue_mapping(skb);
2588 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002589
Ariel Elior6383c0b2011-07-14 08:31:57 +00002590 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2591
2592 /* decode the fastpath index and the cos index from the txq */
2593 fp_index = TXQ_TO_FP(txq_index);
2594 txdata_index = TXQ_TO_COS(txq_index);
2595
2596#ifdef BCM_CNIC
2597 /*
2598 * Override the above for the FCoE queue:
2599 * - FCoE fp entry is right after the ETH entries.
2600 * - FCoE L2 queue uses bp->txdata[0] only.
2601 */
2602 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2603 bnx2x_fcoe_tx(bp, txq_index)))) {
2604 fp_index = FCOE_IDX;
2605 txdata_index = 0;
2606 }
2607#endif
2608
2609 /* enable this debug print to view the transmission queue being used
Joe Perches94f05b02011-08-14 12:16:20 +00002610 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002611 txq_index, fp_index, txdata_index); */
2612
2613 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002614 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002615 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002616
Ariel Elior6383c0b2011-07-14 08:31:57 +00002617 /* enable this debug print to view the tranmission details
2618 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
Joe Perches94f05b02011-08-14 12:16:20 +00002619 " tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002620 txdata->cid, fp_index, txdata_index, txdata, fp); */
2621
2622 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2623 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002624 fp->eth_q_stats.driver_xoff++;
2625 netif_tx_stop_queue(txq);
2626 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2627 return NETDEV_TX_BUSY;
2628 }
2629
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002630 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2631 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002632 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002633 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2634
2635 eth = (struct ethhdr *)skb->data;
2636
2637 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2638 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2639 if (is_broadcast_ether_addr(eth->h_dest))
2640 mac_type = BROADCAST_ADDRESS;
2641 else
2642 mac_type = MULTICAST_ADDRESS;
2643 }
2644
2645#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2646 /* First, check if we need to linearize the skb (due to FW
2647 restrictions). No need to check fragmentation if page size > 8K
2648 (there will be no violation to FW restrictions) */
2649 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2650 /* Statistics of linearization */
2651 bp->lin_cnt++;
2652 if (skb_linearize(skb) != 0) {
2653 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2654 "silently dropping this SKB\n");
2655 dev_kfree_skb_any(skb);
2656 return NETDEV_TX_OK;
2657 }
2658 }
2659#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002660 /* Map skb linear data for DMA */
2661 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2662 skb_headlen(skb), DMA_TO_DEVICE);
2663 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2664 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2665 "silently dropping this SKB\n");
2666 dev_kfree_skb_any(skb);
2667 return NETDEV_TX_OK;
2668 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002669 /*
2670 Please read carefully. First we use one BD which we mark as start,
2671 then we have a parsing info BD (used for TSO or xsum),
2672 and only then we have the rest of the TSO BDs.
2673 (don't forget to mark the last one as last,
2674 and to unmap only AFTER you write to the BD ...)
2675 And above all, all pdb sizes are in words - NOT DWORDS!
2676 */
2677
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002678 /* get current pkt produced now - advance it just before sending packet
2679 * since mapping of pages may fail and cause packet to be dropped
2680 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002681 pkt_prod = txdata->tx_pkt_prod;
2682 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002683
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002684 /* get a tx_buf and first BD
2685 * tx_start_bd may be changed during SPLIT,
2686 * but first_bd will always stay first
2687 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002688 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2689 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002690 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002691
2692 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002693 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2694 mac_type);
2695
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002696 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002697 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002698
2699 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002700 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002701 tx_buf->skb = skb;
2702 tx_buf->flags = 0;
2703
2704 DP(NETIF_MSG_TX_QUEUED,
2705 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002706 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002707
Jesse Grosseab6d182010-10-20 13:56:03 +00002708 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002709 tx_start_bd->vlan_or_ethertype =
2710 cpu_to_le16(vlan_tx_tag_get(skb));
2711 tx_start_bd->bd_flags.as_bitfield |=
2712 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002713 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002714 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002715
2716 /* turn on parsing and get a BD */
2717 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002718
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002719 if (xmit_type & XMIT_CSUM)
2720 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002721
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002722 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002723 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002724 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2725 /* Set PBD in checksum offload case */
2726 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002727 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2728 &pbd_e2_parsing_data,
2729 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002730 if (IS_MF_SI(bp)) {
2731 /*
2732 * fill in the MAC addresses in the PBD - for local
2733 * switching
2734 */
2735 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2736 &pbd_e2->src_mac_addr_mid,
2737 &pbd_e2->src_mac_addr_lo,
2738 eth->h_source);
2739 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2740 &pbd_e2->dst_mac_addr_mid,
2741 &pbd_e2->dst_mac_addr_lo,
2742 eth->h_dest);
2743 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002744 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002745 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002746 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2747 /* Set PBD in checksum offload case */
2748 if (xmit_type & XMIT_CSUM)
2749 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002750
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002751 }
2752
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002753 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002754 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2755 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002756 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002757 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2758 pkt_size = tx_start_bd->nbytes;
2759
2760 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2761 " nbytes %d flags %x vlan %x\n",
2762 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2763 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002764 tx_start_bd->bd_flags.as_bitfield,
2765 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002766
2767 if (xmit_type & XMIT_GSO) {
2768
2769 DP(NETIF_MSG_TX_QUEUED,
2770 "TSO packet len %d hlen %d total len %d tso size %d\n",
2771 skb->len, hlen, skb_headlen(skb),
2772 skb_shinfo(skb)->gso_size);
2773
2774 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2775
2776 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002777 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2778 &tx_start_bd, hlen,
2779 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002780 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002781 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2782 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002783 else
2784 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002785 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002786
2787 /* Set the PBD's parsing_data field if not zero
2788 * (for the chips newer than 57711).
2789 */
2790 if (pbd_e2_parsing_data)
2791 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002793 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2794
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002795 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002796 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2797 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2798
Ian Campbellf55c9572011-08-24 22:28:13 +00002799 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
2800 DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002801 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2802
2803 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2804 "dropping packet...\n");
2805
2806 /* we need unmap all buffers already mapped
2807 * for this SKB;
2808 * first_bd->nbd need to be properly updated
2809 * before call to bnx2x_free_tx_pkt
2810 */
2811 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002812 bnx2x_free_tx_pkt(bp, txdata,
2813 TX_BD(txdata->tx_pkt_prod));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002814 return NETDEV_TX_OK;
2815 }
2816
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002817 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002818 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002819 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002820 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002821
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002822 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2823 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2824 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2825 le16_add_cpu(&pkt_size, frag->size);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002826 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827
2828 DP(NETIF_MSG_TX_QUEUED,
2829 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2830 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2831 le16_to_cpu(tx_data_bd->nbytes));
2832 }
2833
2834 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2835
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002836 /* update with actual num BDs */
2837 first_bd->nbd = cpu_to_le16(nbd);
2838
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002839 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2840
2841 /* now send a tx doorbell, counting the next BD
2842 * if the packet contains or ends with it
2843 */
2844 if (TX_BD_POFF(bd_prod) < nbd)
2845 nbd++;
2846
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002847 /* total_pkt_bytes should be set on the first data BD if
2848 * it's not an LSO packet and there is more than one
2849 * data BD. In this case pkt_size is limited by an MTU value.
2850 * However we prefer to set it for an LSO packet (while we don't
2851 * have to) in order to save some CPU cycles in a none-LSO
2852 * case, when we much more care about them.
2853 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002854 if (total_pkt_bd != NULL)
2855 total_pkt_bd->total_pkt_bytes = pkt_size;
2856
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002857 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002858 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002859 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002860 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002861 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2862 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2863 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2864 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002865 if (pbd_e2)
2866 DP(NETIF_MSG_TX_QUEUED,
2867 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2868 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2869 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2870 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2871 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002872 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2873
Ariel Elior6383c0b2011-07-14 08:31:57 +00002874 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002875 /*
2876 * Make sure that the BD data is updated before updating the producer
2877 * since FW might read the BD right after the producer is updated.
2878 * This is only applicable for weak-ordered memory model archs such
2879 * as IA-64. The following barrier is also mandatory since FW will
2880 * assumes packets must have BDs.
2881 */
2882 wmb();
2883
Ariel Elior6383c0b2011-07-14 08:31:57 +00002884 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002885 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002886
Ariel Elior6383c0b2011-07-14 08:31:57 +00002887 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002888
2889 mmiowb();
2890
Ariel Elior6383c0b2011-07-14 08:31:57 +00002891 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002892
Ariel Elior6383c0b2011-07-14 08:31:57 +00002893 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002894 netif_tx_stop_queue(txq);
2895
2896 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2897 * ordering of set_bit() in netif_tx_stop_queue() and read of
2898 * fp->bd_tx_cons */
2899 smp_mb();
2900
2901 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002902 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002903 netif_tx_wake_queue(txq);
2904 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002905 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002906
2907 return NETDEV_TX_OK;
2908}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002909
Ariel Elior6383c0b2011-07-14 08:31:57 +00002910/**
2911 * bnx2x_setup_tc - routine to configure net_device for multi tc
2912 *
2913 * @netdev: net device to configure
2914 * @tc: number of traffic classes to enable
2915 *
2916 * callback connected to the ndo_setup_tc function pointer
2917 */
2918int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2919{
2920 int cos, prio, count, offset;
2921 struct bnx2x *bp = netdev_priv(dev);
2922
2923 /* setup tc must be called under rtnl lock */
2924 ASSERT_RTNL();
2925
2926 /* no traffic classes requested. aborting */
2927 if (!num_tc) {
2928 netdev_reset_tc(dev);
2929 return 0;
2930 }
2931
2932 /* requested to support too many traffic classes */
2933 if (num_tc > bp->max_cos) {
2934 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
Joe Perches94f05b02011-08-14 12:16:20 +00002935 " requested: %d. max supported is %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002936 num_tc, bp->max_cos);
2937 return -EINVAL;
2938 }
2939
2940 /* declare amount of supported traffic classes */
2941 if (netdev_set_num_tc(dev, num_tc)) {
Joe Perches94f05b02011-08-14 12:16:20 +00002942 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002943 num_tc);
2944 return -EINVAL;
2945 }
2946
2947 /* configure priority to traffic class mapping */
2948 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2949 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Joe Perches94f05b02011-08-14 12:16:20 +00002950 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002951 prio, bp->prio_to_cos[prio]);
2952 }
2953
2954
2955 /* Use this configuration to diffrentiate tc0 from other COSes
2956 This can be used for ets or pfc, and save the effort of setting
2957 up a multio class queue disc or negotiating DCBX with a switch
2958 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00002959 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002960 for (prio = 1; prio < 16; prio++) {
2961 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00002962 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002963 } */
2964
2965 /* configure traffic class to transmission queue mapping */
2966 for (cos = 0; cos < bp->max_cos; cos++) {
2967 count = BNX2X_NUM_ETH_QUEUES(bp);
2968 offset = cos * MAX_TXQS_PER_COS;
2969 netdev_set_tc_queue(dev, cos, count, offset);
Joe Perches94f05b02011-08-14 12:16:20 +00002970 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002971 cos, offset, count);
2972 }
2973
2974 return 0;
2975}
2976
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002977/* called with rtnl_lock */
2978int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2979{
2980 struct sockaddr *addr = p;
2981 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002982 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002983
2984 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2985 return -EINVAL;
2986
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002987 if (netif_running(dev)) {
2988 rc = bnx2x_set_eth_mac(bp, false);
2989 if (rc)
2990 return rc;
2991 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002992
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002993 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2994
2995 if (netif_running(dev))
2996 rc = bnx2x_set_eth_mac(bp, true);
2997
2998 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002999}
3000
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003001static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3002{
3003 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3004 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003005 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003006
3007 /* Common */
3008#ifdef BCM_CNIC
3009 if (IS_FCOE_IDX(fp_index)) {
3010 memset(sb, 0, sizeof(union host_hc_status_block));
3011 fp->status_blk_mapping = 0;
3012
3013 } else {
3014#endif
3015 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003016 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003017 BNX2X_PCI_FREE(sb->e2_sb,
3018 bnx2x_fp(bp, fp_index,
3019 status_blk_mapping),
3020 sizeof(struct host_hc_status_block_e2));
3021 else
3022 BNX2X_PCI_FREE(sb->e1x_sb,
3023 bnx2x_fp(bp, fp_index,
3024 status_blk_mapping),
3025 sizeof(struct host_hc_status_block_e1x));
3026#ifdef BCM_CNIC
3027 }
3028#endif
3029 /* Rx */
3030 if (!skip_rx_queue(bp, fp_index)) {
3031 bnx2x_free_rx_bds(fp);
3032
3033 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3034 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3035 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3036 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3037 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3038
3039 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3040 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3041 sizeof(struct eth_fast_path_rx_cqe) *
3042 NUM_RCQ_BD);
3043
3044 /* SGE ring */
3045 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3046 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3047 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3048 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3049 }
3050
3051 /* Tx */
3052 if (!skip_tx_queue(bp, fp_index)) {
3053 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003054 for_each_cos_in_tx_queue(fp, cos) {
3055 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3056
3057 DP(BNX2X_MSG_SP,
Joe Perches94f05b02011-08-14 12:16:20 +00003058 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003059 fp_index, cos, txdata->cid);
3060
3061 BNX2X_FREE(txdata->tx_buf_ring);
3062 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3063 txdata->tx_desc_mapping,
3064 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3065 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003066 }
3067 /* end of fastpath */
3068}
3069
3070void bnx2x_free_fp_mem(struct bnx2x *bp)
3071{
3072 int i;
3073 for_each_queue(bp, i)
3074 bnx2x_free_fp_mem_at(bp, i);
3075}
3076
3077static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3078{
3079 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003080 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003081 bnx2x_fp(bp, index, sb_index_values) =
3082 (__le16 *)status_blk.e2_sb->sb.index_values;
3083 bnx2x_fp(bp, index, sb_running_index) =
3084 (__le16 *)status_blk.e2_sb->sb.running_index;
3085 } else {
3086 bnx2x_fp(bp, index, sb_index_values) =
3087 (__le16 *)status_blk.e1x_sb->sb.index_values;
3088 bnx2x_fp(bp, index, sb_running_index) =
3089 (__le16 *)status_blk.e1x_sb->sb.running_index;
3090 }
3091}
3092
3093static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3094{
3095 union host_hc_status_block *sb;
3096 struct bnx2x_fastpath *fp = &bp->fp[index];
3097 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003098 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003099 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003100
3101 /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003102 if (!bp->rx_ring_size) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003103
David S. Miller8decf862011-09-22 03:23:13 -04003104 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3105
3106 /* allocate at least number of buffers required by FW */
3107 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3108 MIN_RX_SIZE_TPA, rx_ring_size);
3109
3110 bp->rx_ring_size = rx_ring_size;
3111 } else
3112 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003113
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003114 /* Common */
3115 sb = &bnx2x_fp(bp, index, status_blk);
3116#ifdef BCM_CNIC
3117 if (!IS_FCOE_IDX(index)) {
3118#endif
3119 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003120 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003121 BNX2X_PCI_ALLOC(sb->e2_sb,
3122 &bnx2x_fp(bp, index, status_blk_mapping),
3123 sizeof(struct host_hc_status_block_e2));
3124 else
3125 BNX2X_PCI_ALLOC(sb->e1x_sb,
3126 &bnx2x_fp(bp, index, status_blk_mapping),
3127 sizeof(struct host_hc_status_block_e1x));
3128#ifdef BCM_CNIC
3129 }
3130#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003131
3132 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3133 * set shortcuts for it.
3134 */
3135 if (!IS_FCOE_IDX(index))
3136 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003137
3138 /* Tx */
3139 if (!skip_tx_queue(bp, index)) {
3140 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003141 for_each_cos_in_tx_queue(fp, cos) {
3142 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3143
3144 DP(BNX2X_MSG_SP, "allocating tx memory of "
Joe Perches94f05b02011-08-14 12:16:20 +00003145 "fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003146 index, cos);
3147
3148 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003149 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003150 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3151 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003152 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003153 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003154 }
3155
3156 /* Rx */
3157 if (!skip_rx_queue(bp, index)) {
3158 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3159 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3160 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3161 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3162 &bnx2x_fp(bp, index, rx_desc_mapping),
3163 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3164
3165 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3166 &bnx2x_fp(bp, index, rx_comp_mapping),
3167 sizeof(struct eth_fast_path_rx_cqe) *
3168 NUM_RCQ_BD);
3169
3170 /* SGE ring */
3171 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3172 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3173 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3174 &bnx2x_fp(bp, index, rx_sge_mapping),
3175 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3176 /* RX BD ring */
3177 bnx2x_set_next_page_rx_bd(fp);
3178
3179 /* CQ ring */
3180 bnx2x_set_next_page_rx_cq(fp);
3181
3182 /* BDs */
3183 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3184 if (ring_size < rx_ring_size)
3185 goto alloc_mem_err;
3186 }
3187
3188 return 0;
3189
3190/* handles low memory cases */
3191alloc_mem_err:
3192 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3193 index, ring_size);
3194 /* FW will drop all packets if queue is not big enough,
3195 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003196 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003197 */
3198 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003199 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003200 /* release memory allocated for this queue */
3201 bnx2x_free_fp_mem_at(bp, index);
3202 return -ENOMEM;
3203 }
3204 return 0;
3205}
3206
3207int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3208{
3209 int i;
3210
3211 /**
3212 * 1. Allocate FP for leading - fatal if error
3213 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003214 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3215 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003216 */
3217
3218 /* leading */
3219 if (bnx2x_alloc_fp_mem_at(bp, 0))
3220 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003221
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003222#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003223 if (!NO_FCOE(bp))
3224 /* FCoE */
3225 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3226 /* we will fail load process instead of mark
3227 * NO_FCOE_FLAG
3228 */
3229 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003230#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003231
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003232 /* RSS */
3233 for_each_nondefault_eth_queue(bp, i)
3234 if (bnx2x_alloc_fp_mem_at(bp, i))
3235 break;
3236
3237 /* handle memory failures */
3238 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3239 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3240
3241 WARN_ON(delta < 0);
3242#ifdef BCM_CNIC
3243 /**
3244 * move non eth FPs next to last eth FP
3245 * must be done in that order
3246 * FCOE_IDX < FWD_IDX < OOO_IDX
3247 */
3248
Ariel Elior6383c0b2011-07-14 08:31:57 +00003249 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003250 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3251#endif
3252 bp->num_queues -= delta;
3253 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3254 bp->num_queues + delta, bp->num_queues);
3255 }
3256
3257 return 0;
3258}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003259
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003260void bnx2x_free_mem_bp(struct bnx2x *bp)
3261{
3262 kfree(bp->fp);
3263 kfree(bp->msix_table);
3264 kfree(bp->ilt);
3265}
3266
3267int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3268{
3269 struct bnx2x_fastpath *fp;
3270 struct msix_entry *tbl;
3271 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003272 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003273
Ariel Elior6383c0b2011-07-14 08:31:57 +00003274 /*
3275 * The biggest MSI-X table we might need is as a maximum number of fast
3276 * path IGU SBs plus default SB (for PF).
3277 */
3278 msix_table_size = bp->igu_sb_cnt + 1;
3279
3280 /* fp array: RSS plus CNIC related L2 queues */
3281 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3282 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003283 if (!fp)
3284 goto alloc_err;
3285 bp->fp = fp;
3286
3287 /* msix table */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003288 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003289 if (!tbl)
3290 goto alloc_err;
3291 bp->msix_table = tbl;
3292
3293 /* ilt */
3294 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3295 if (!ilt)
3296 goto alloc_err;
3297 bp->ilt = ilt;
3298
3299 return 0;
3300alloc_err:
3301 bnx2x_free_mem_bp(bp);
3302 return -ENOMEM;
3303
3304}
3305
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003306int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003307{
3308 struct bnx2x *bp = netdev_priv(dev);
3309
3310 if (unlikely(!netif_running(dev)))
3311 return 0;
3312
3313 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3314 return bnx2x_nic_load(bp, LOAD_NORMAL);
3315}
3316
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003317int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3318{
3319 u32 sel_phy_idx = 0;
3320 if (bp->link_params.num_phys <= 1)
3321 return INT_PHY;
3322
3323 if (bp->link_vars.link_up) {
3324 sel_phy_idx = EXT_PHY1;
3325 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3326 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3327 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3328 sel_phy_idx = EXT_PHY2;
3329 } else {
3330
3331 switch (bnx2x_phy_selection(&bp->link_params)) {
3332 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3333 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3334 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3335 sel_phy_idx = EXT_PHY1;
3336 break;
3337 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3338 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3339 sel_phy_idx = EXT_PHY2;
3340 break;
3341 }
3342 }
3343
3344 return sel_phy_idx;
3345
3346}
3347int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3348{
3349 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3350 /*
3351 * The selected actived PHY is always after swapping (in case PHY
3352 * swapping is enabled). So when swapping is enabled, we need to reverse
3353 * the configuration
3354 */
3355
3356 if (bp->link_params.multi_phy_config &
3357 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3358 if (sel_phy_idx == EXT_PHY1)
3359 sel_phy_idx = EXT_PHY2;
3360 else if (sel_phy_idx == EXT_PHY2)
3361 sel_phy_idx = EXT_PHY1;
3362 }
3363 return LINK_CONFIG_IDX(sel_phy_idx);
3364}
3365
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003366#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3367int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3368{
3369 struct bnx2x *bp = netdev_priv(dev);
3370 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3371
3372 switch (type) {
3373 case NETDEV_FCOE_WWNN:
3374 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3375 cp->fcoe_wwn_node_name_lo);
3376 break;
3377 case NETDEV_FCOE_WWPN:
3378 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3379 cp->fcoe_wwn_port_name_lo);
3380 break;
3381 default:
3382 return -EINVAL;
3383 }
3384
3385 return 0;
3386}
3387#endif
3388
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003389/* called with rtnl_lock */
3390int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3391{
3392 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003393
3394 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Joe Perchesf1deab52011-08-14 12:16:21 +00003395 pr_err("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003396 return -EAGAIN;
3397 }
3398
3399 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3400 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3401 return -EINVAL;
3402
3403 /* This does not race with packet allocation
3404 * because the actual alloc size is
3405 * only updated as part of load
3406 */
3407 dev->mtu = new_mtu;
3408
Michał Mirosław66371c42011-04-12 09:38:23 +00003409 return bnx2x_reload_if_running(dev);
3410}
3411
3412u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3413{
3414 struct bnx2x *bp = netdev_priv(dev);
3415
3416 /* TPA requires Rx CSUM offloading */
3417 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3418 features &= ~NETIF_F_LRO;
3419
3420 return features;
3421}
3422
3423int bnx2x_set_features(struct net_device *dev, u32 features)
3424{
3425 struct bnx2x *bp = netdev_priv(dev);
3426 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003427 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003428
3429 if (features & NETIF_F_LRO)
3430 flags |= TPA_ENABLE_FLAG;
3431 else
3432 flags &= ~TPA_ENABLE_FLAG;
3433
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003434 if (features & NETIF_F_LOOPBACK) {
3435 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3436 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3437 bnx2x_reload = true;
3438 }
3439 } else {
3440 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3441 bp->link_params.loopback_mode = LOOPBACK_NONE;
3442 bnx2x_reload = true;
3443 }
3444 }
3445
Michał Mirosław66371c42011-04-12 09:38:23 +00003446 if (flags ^ bp->flags) {
3447 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003448 bnx2x_reload = true;
3449 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003450
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003451 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003452 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3453 return bnx2x_reload_if_running(dev);
3454 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003455 }
3456
Michał Mirosław66371c42011-04-12 09:38:23 +00003457 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003458}
3459
3460void bnx2x_tx_timeout(struct net_device *dev)
3461{
3462 struct bnx2x *bp = netdev_priv(dev);
3463
3464#ifdef BNX2X_STOP_ON_ERROR
3465 if (!bp->panic)
3466 bnx2x_panic();
3467#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003468
3469 smp_mb__before_clear_bit();
3470 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3471 smp_mb__after_clear_bit();
3472
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003473 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003474 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003475}
3476
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003477int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3478{
3479 struct net_device *dev = pci_get_drvdata(pdev);
3480 struct bnx2x *bp;
3481
3482 if (!dev) {
3483 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3484 return -ENODEV;
3485 }
3486 bp = netdev_priv(dev);
3487
3488 rtnl_lock();
3489
3490 pci_save_state(pdev);
3491
3492 if (!netif_running(dev)) {
3493 rtnl_unlock();
3494 return 0;
3495 }
3496
3497 netif_device_detach(dev);
3498
3499 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3500
3501 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3502
3503 rtnl_unlock();
3504
3505 return 0;
3506}
3507
3508int bnx2x_resume(struct pci_dev *pdev)
3509{
3510 struct net_device *dev = pci_get_drvdata(pdev);
3511 struct bnx2x *bp;
3512 int rc;
3513
3514 if (!dev) {
3515 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3516 return -ENODEV;
3517 }
3518 bp = netdev_priv(dev);
3519
3520 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Joe Perchesf1deab52011-08-14 12:16:21 +00003521 pr_err("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003522 return -EAGAIN;
3523 }
3524
3525 rtnl_lock();
3526
3527 pci_restore_state(pdev);
3528
3529 if (!netif_running(dev)) {
3530 rtnl_unlock();
3531 return 0;
3532 }
3533
3534 bnx2x_set_power_state(bp, PCI_D0);
3535 netif_device_attach(dev);
3536
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003537 /* Since the chip was reset, clear the FW sequence number */
3538 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003539 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3540
3541 rtnl_unlock();
3542
3543 return rc;
3544}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003545
3546
3547void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3548 u32 cid)
3549{
3550 /* ustorm cxt validation */
3551 cxt->ustorm_ag_context.cdu_usage =
3552 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3553 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3554 /* xcontext validation */
3555 cxt->xstorm_ag_context.cdu_reserved =
3556 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3557 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3558}
3559
3560static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3561 u8 fw_sb_id, u8 sb_index,
3562 u8 ticks)
3563{
3564
3565 u32 addr = BAR_CSTRORM_INTMEM +
3566 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3567 REG_WR8(bp, addr, ticks);
3568 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3569 port, fw_sb_id, sb_index, ticks);
3570}
3571
3572static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3573 u16 fw_sb_id, u8 sb_index,
3574 u8 disable)
3575{
3576 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3577 u32 addr = BAR_CSTRORM_INTMEM +
3578 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3579 u16 flags = REG_RD16(bp, addr);
3580 /* clear and set */
3581 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3582 flags |= enable_flag;
3583 REG_WR16(bp, addr, flags);
3584 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3585 port, fw_sb_id, sb_index, disable);
3586}
3587
3588void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3589 u8 sb_index, u8 disable, u16 usec)
3590{
3591 int port = BP_PORT(bp);
3592 u8 ticks = usec / BNX2X_BTR;
3593
3594 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3595
3596 disable = disable ? 1 : (usec ? 0 : 1);
3597 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3598}