blob: 8b079dcd5b967dc633eb6242942793c6477c4a04 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000020#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000021#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000022#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070023#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000024#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000025#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000026#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000027#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000028#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030030
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
Ariel Elior6383c0b2011-07-14 08:31:57 +000050
51 fp->bp = bp;
52 fp->index = index;
53 if (IS_ETH_FP(fp))
54 fp->max_cos = bp->max_cos;
55 else
56 /* Special queues support only one CoS */
57 fp->max_cos = 1;
58
59 /*
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
62 */
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65#ifdef BCM_CNIC
66 /* We don't want TPA on FCoE, FWD and OOO L2 rings */
67 bnx2x_fcoe(bp, disable_tpa) = 1;
68#endif
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000069}
70
71/**
72 * bnx2x_move_fp - move content of the fastpath structure.
73 *
74 * @bp: driver handle
75 * @from: source FP index
76 * @to: destination FP index
77 *
78 * Makes sure the contents of the bp->fp[to].napi is kept
79 * intact.
80 */
81static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
82{
83 struct bnx2x_fastpath *from_fp = &bp->fp[from];
84 struct bnx2x_fastpath *to_fp = &bp->fp[to];
85 struct napi_struct orig_napi = to_fp->napi;
86 /* Move bnx2x_fastpath contents */
87 memcpy(to_fp, from_fp, sizeof(*to_fp));
88 to_fp->index = to;
89
90 /* Restore the NAPI object as it has been already initialized */
91 to_fp->napi = orig_napi;
92}
93
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030094int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
95
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000096/* free skb in the packet ring at pos idx
97 * return idx of last bd freed
98 */
Ariel Elior6383c0b2011-07-14 08:31:57 +000099static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000100 u16 idx)
101{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000102 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000103 struct eth_tx_start_bd *tx_start_bd;
104 struct eth_tx_bd *tx_data_bd;
105 struct sk_buff *skb = tx_buf->skb;
106 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
107 int nbd;
108
109 /* prefetch skb end pointer to speedup dev_kfree_skb() */
110 prefetch(&skb->end);
111
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300112 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000113 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000114
115 /* unmap first bd */
116 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000117 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000118 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000119 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300121
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000122 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
123#ifdef BNX2X_STOP_ON_ERROR
124 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
125 BNX2X_ERR("BAD nbd!\n");
126 bnx2x_panic();
127 }
128#endif
129 new_cons = nbd + tx_buf->first_bd;
130
131 /* Get the next bd */
132 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133
134 /* Skip a parse bd... */
135 --nbd;
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137
138 /* ...and the TSO split header bd since they have no mapping */
139 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
140 --nbd;
141 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142 }
143
144 /* now free frags */
145 while (nbd > 0) {
146
147 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000148 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000149 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
150 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
151 if (--nbd)
152 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 }
154
155 /* release skb */
156 WARN_ON(!skb);
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000157 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000158 tx_buf->first_bd = 0;
159 tx_buf->skb = NULL;
160
161 return new_cons;
162}
163
Ariel Elior6383c0b2011-07-14 08:31:57 +0000164int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000165{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000166 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000167 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
169#ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
171 return -1;
172#endif
173
Ariel Elior6383c0b2011-07-14 08:31:57 +0000174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000177
178 while (sw_cons != hw_cons) {
179 u16 pkt_cons;
180
181 pkt_cons = TX_BD(sw_cons);
182
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000183 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
184 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
Ariel Elior6383c0b2011-07-14 08:31:57 +0000187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000188 sw_cons++;
189 }
190
Ariel Elior6383c0b2011-07-14 08:31:57 +0000191 txdata->tx_pkt_cons = sw_cons;
192 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000193
194 /* Need to make the tx_bd_cons update visible to start_xmit()
195 * before checking for netif_tx_queue_stopped(). Without the
196 * memory barrier, there is a small possibility that
197 * start_xmit() will miss it and cause the queue to be stopped
198 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300199 * On the other hand we need an rmb() here to ensure the proper
200 * ordering of bit testing in the following
201 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000202 */
203 smp_mb();
204
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000205 if (unlikely(netif_tx_queue_stopped(txq))) {
206 /* Taking tx_lock() is needed to prevent reenabling the queue
207 * while it's empty. This could have happen if rx_action() gets
208 * suspended in bnx2x_tx_int() after the condition before
209 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
210 *
211 * stops the queue->sees fresh tx_bd_cons->releases the queue->
212 * sends some packets consuming the whole queue again->
213 * stops the queue
214 */
215
216 __netif_tx_lock(txq, smp_processor_id());
217
218 if ((netif_tx_queue_stopped(txq)) &&
219 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000220 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000221 netif_tx_wake_queue(txq);
222
223 __netif_tx_unlock(txq);
224 }
225 return 0;
226}
227
228static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
229 u16 idx)
230{
231 u16 last_max = fp->last_max_sge;
232
233 if (SUB_S16(idx, last_max) > 0)
234 fp->last_max_sge = idx;
235}
236
237static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
238 struct eth_fast_path_rx_cqe *fp_cqe)
239{
240 struct bnx2x *bp = fp->bp;
241 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
242 le16_to_cpu(fp_cqe->len_on_bd)) >>
243 SGE_PAGE_SHIFT;
244 u16 last_max, last_elem, first_elem;
245 u16 delta = 0;
246 u16 i;
247
248 if (!sge_len)
249 return;
250
251 /* First mark all used pages */
252 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300253 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000254 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000255
256 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000257 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000258
259 /* Here we assume that the last SGE index is the biggest */
260 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000261 bnx2x_update_last_max_sge(fp,
262 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000263
264 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300265 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
266 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000267
268 /* If ring is not full */
269 if (last_elem + 1 != first_elem)
270 last_elem++;
271
272 /* Now update the prod */
273 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
274 if (likely(fp->sge_mask[i]))
275 break;
276
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300277 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
278 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000279 }
280
281 if (delta > 0) {
282 fp->rx_sge_prod += delta;
283 /* clear page-end entries */
284 bnx2x_clear_sge_mask_next_elems(fp);
285 }
286
287 DP(NETIF_MSG_RX_STATUS,
288 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
289 fp->last_max_sge, fp->rx_sge_prod);
290}
291
292static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300293 struct sk_buff *skb, u16 cons, u16 prod,
294 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000295{
296 struct bnx2x *bp = fp->bp;
297 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
298 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
299 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
300 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300301 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
302 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000303
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300304 /* print error if current state != stop */
305 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000306 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
307
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300308 /* Try to map an empty skb from the aggregation info */
309 mapping = dma_map_single(&bp->pdev->dev,
310 first_buf->skb->data,
311 fp->rx_buf_size, DMA_FROM_DEVICE);
312 /*
313 * ...if it fails - move the skb from the consumer to the producer
314 * and set the current aggregation state as ERROR to drop it
315 * when TPA_STOP arrives.
316 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000317
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300318 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
319 /* Move the BD from the consumer to the producer */
320 bnx2x_reuse_rx_skb(fp, cons, prod);
321 tpa_info->tpa_state = BNX2X_TPA_ERROR;
322 return;
323 }
324
325 /* move empty skb from pool to prod */
326 prod_rx_buf->skb = first_buf->skb;
327 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000328 /* point prod_bd to new skb */
329 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
330 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
331
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300332 /* move partial skb from cons to pool (don't unmap yet) */
333 *first_buf = *cons_rx_buf;
334
335 /* mark bin state as START */
336 tpa_info->parsing_flags =
337 le16_to_cpu(cqe->pars_flags.flags);
338 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
339 tpa_info->tpa_state = BNX2X_TPA_START;
340 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
341 tpa_info->placement_offset = cqe->placement_offset;
342
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000343#ifdef BNX2X_STOP_ON_ERROR
344 fp->tpa_queue_used |= (1 << queue);
345#ifdef _ASM_GENERIC_INT_L64_H
346 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
347#else
348 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
349#endif
350 fp->tpa_queue_used);
351#endif
352}
353
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000354/* Timestamp option length allowed for TPA aggregation:
355 *
356 * nop nop kind length echo val
357 */
358#define TPA_TSTAMP_OPT_LEN 12
359/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000360 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000361 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000362 * @bp: driver handle
363 * @parsing_flags: parsing flags from the START CQE
364 * @len_on_bd: total length of the first packet for the
365 * aggregation.
366 *
367 * Approximate value of the MSS for this aggregation calculated using
368 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000369 */
370static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
371 u16 len_on_bd)
372{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300373 /*
374 * TPA arrgregation won't have either IP options or TCP options
375 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000376 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300377 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
378
379 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
380 PRS_FLAG_OVERETH_IPV6)
381 hdrs_len += sizeof(struct ipv6hdr);
382 else /* IPv4 */
383 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000384
385
386 /* Check if there was a TCP timestamp, if there is it's will
387 * always be 12 bytes length: nop nop kind length echo val.
388 *
389 * Otherwise FW would close the aggregation.
390 */
391 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
392 hdrs_len += TPA_TSTAMP_OPT_LEN;
393
394 return len_on_bd - hdrs_len;
395}
396
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000397static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300398 u16 queue, struct sk_buff *skb,
399 struct eth_end_agg_rx_cqe *cqe,
400 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000401{
402 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000403 u32 i, frag_len, frag_size, pages;
404 int err;
405 int j;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300406 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
407 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000408
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300409 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000410 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
411
412 /* This is needed in order to enable forwarding support */
413 if (frag_size)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300414 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
415 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000416
417#ifdef BNX2X_STOP_ON_ERROR
418 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
419 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
420 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300421 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000422 bnx2x_panic();
423 return -EINVAL;
424 }
425#endif
426
427 /* Run through the SGL and compose the fragmented skb */
428 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300429 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000430
431 /* FW gives the indices of the SGE as if the ring is an array
432 (meaning that "next" element will consume 2 indices) */
433 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
434 rx_pg = &fp->rx_page_ring[sge_idx];
435 old_rx_pg = *rx_pg;
436
437 /* If we fail to allocate a substitute page, we simply stop
438 where we are and drop the whole packet */
439 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
440 if (unlikely(err)) {
441 fp->eth_q_stats.rx_skb_alloc_failed++;
442 return err;
443 }
444
445 /* Unmap the page as we r going to pass it to the stack */
446 dma_unmap_page(&bp->pdev->dev,
447 dma_unmap_addr(&old_rx_pg, mapping),
448 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
449
450 /* Add one frag and update the appropriate fields in the skb */
451 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
452
453 skb->data_len += frag_len;
454 skb->truesize += frag_len;
455 skb->len += frag_len;
456
457 frag_size -= frag_len;
458 }
459
460 return 0;
461}
462
463static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300464 u16 queue, struct eth_end_agg_rx_cqe *cqe,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000465 u16 cqe_idx)
466{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300467 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
468 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
469 u8 pad = tpa_info->placement_offset;
470 u16 len = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000471 struct sk_buff *skb = rx_buf->skb;
472 /* alloc new skb */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300473 struct sk_buff *new_skb;
474 u8 old_tpa_state = tpa_info->tpa_state;
475
476 tpa_info->tpa_state = BNX2X_TPA_STOP;
477
478 /* If we there was an error during the handling of the TPA_START -
479 * drop this aggregation.
480 */
481 if (old_tpa_state == BNX2X_TPA_ERROR)
482 goto drop;
483
484 /* Try to allocate the new skb */
485 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000486
487 /* Unmap skb in the pool anyway, as we are going to change
488 pool entry status to BNX2X_TPA_STOP even if new skb allocation
489 fails. */
490 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800491 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000492
493 if (likely(new_skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000494 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000495 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000496
497#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800498 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000499 BNX2X_ERR("skb_put is about to fail... "
500 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800501 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000502 bnx2x_panic();
503 return;
504 }
505#endif
506
507 skb_reserve(skb, pad);
508 skb_put(skb, len);
509
510 skb->protocol = eth_type_trans(skb, bp->dev);
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
512
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300513 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
514 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
515 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000516 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000517 } else {
518 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
519 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000520 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000521 }
522
523
524 /* put new skb in bin */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300525 rx_buf->skb = new_skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000526
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300527 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000528 }
529
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530drop:
531 /* drop the packet and keep the buffer in the bin */
532 DP(NETIF_MSG_RX_STATUS,
533 "Failed to allocate or map a new skb - dropping packet!\n");
534 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000535}
536
537/* Set Toeplitz hash value in the skb using the value from the
538 * CQE (calculated by HW).
539 */
540static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
541 struct sk_buff *skb)
542{
543 /* Set Toeplitz hash from CQE */
544 if ((bp->dev->features & NETIF_F_RXHASH) &&
545 (cqe->fast_path_cqe.status_flags &
546 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
547 skb->rxhash =
548 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
549}
550
551int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
552{
553 struct bnx2x *bp = fp->bp;
554 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
555 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
556 int rx_pkt = 0;
557
558#ifdef BNX2X_STOP_ON_ERROR
559 if (unlikely(bp->panic))
560 return 0;
561#endif
562
563 /* CQ "next element" is of the size of the regular element,
564 that's why it's ok here */
565 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
566 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
567 hw_comp_cons++;
568
569 bd_cons = fp->rx_bd_cons;
570 bd_prod = fp->rx_bd_prod;
571 bd_prod_fw = bd_prod;
572 sw_comp_cons = fp->rx_comp_cons;
573 sw_comp_prod = fp->rx_comp_prod;
574
575 /* Memory barrier necessary as speculative reads of the rx
576 * buffer can be ahead of the index in the status block
577 */
578 rmb();
579
580 DP(NETIF_MSG_RX_STATUS,
581 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
582 fp->index, hw_comp_cons, sw_comp_cons);
583
584 while (sw_comp_cons != hw_comp_cons) {
585 struct sw_rx_bd *rx_buf = NULL;
586 struct sk_buff *skb;
587 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300588 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000589 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300590 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000591 u16 len, pad;
592
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300593#ifdef BNX2X_STOP_ON_ERROR
594 if (unlikely(bp->panic))
595 return 0;
596#endif
597
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000598 comp_ring_cons = RCQ_BD(sw_comp_cons);
599 bd_prod = RX_BD(bd_prod);
600 bd_cons = RX_BD(bd_cons);
601
602 /* Prefetch the page containing the BD descriptor
603 at producer's index. It will be needed when new skb is
604 allocated */
605 prefetch((void *)(PAGE_ALIGN((unsigned long)
606 (&fp->rx_desc_ring[bd_prod])) -
607 PAGE_SIZE + 1));
608
609 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300610 cqe_fp = &cqe->fast_path_cqe;
611 cqe_fp_flags = cqe_fp->type_error_flags;
612 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000613
614 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
615 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300616 cqe_fp_flags, cqe_fp->status_flags,
617 le32_to_cpu(cqe_fp->rss_hash_result),
618 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000619
620 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300621 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622 bnx2x_sp_event(fp, cqe);
623 goto next_cqe;
624
625 /* this is an rx packet */
626 } else {
627 rx_buf = &fp->rx_buf_ring[bd_cons];
628 skb = rx_buf->skb;
629 prefetch(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000630
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300631 if (!CQE_TYPE_FAST(cqe_fp_type)) {
632#ifdef BNX2X_STOP_ON_ERROR
633 /* sanity check */
634 if (fp->disable_tpa &&
635 (CQE_TYPE_START(cqe_fp_type) ||
636 CQE_TYPE_STOP(cqe_fp_type)))
637 BNX2X_ERR("START/STOP packet while "
638 "disable_tpa type %x\n",
639 CQE_TYPE(cqe_fp_type));
640#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000641
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300642 if (CQE_TYPE_START(cqe_fp_type)) {
643 u16 queue = cqe_fp->queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000644 DP(NETIF_MSG_RX_STATUS,
645 "calling tpa_start on queue %d\n",
646 queue);
647
648 bnx2x_tpa_start(fp, queue, skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300649 bd_cons, bd_prod,
650 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000651
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300652 /* Set Toeplitz hash for LRO skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000653 bnx2x_set_skb_rxhash(bp, cqe, skb);
654
655 goto next_rx;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300656
657 } else {
658 u16 queue =
659 cqe->end_agg_cqe.queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000660 DP(NETIF_MSG_RX_STATUS,
661 "calling tpa_stop on queue %d\n",
662 queue);
663
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300664 bnx2x_tpa_stop(bp, fp, queue,
665 &cqe->end_agg_cqe,
666 comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000667#ifdef BNX2X_STOP_ON_ERROR
668 if (bp->panic)
669 return 0;
670#endif
671
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300672 bnx2x_update_sge_prod(fp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000673 goto next_cqe;
674 }
675 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300676 /* non TPA */
677 len = le16_to_cpu(cqe_fp->pkt_len);
678 pad = cqe_fp->placement_offset;
Vladislav Zolotarov9924caf2011-07-19 01:37:42 +0000679 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000680 dma_unmap_addr(rx_buf, mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300681 pad + RX_COPY_THRESH,
682 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000683 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684
685 /* is this an error packet? */
686 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
687 DP(NETIF_MSG_RX_ERR,
688 "ERROR flags %x rx packet %u\n",
689 cqe_fp_flags, sw_comp_cons);
690 fp->eth_q_stats.rx_err_discard_pkt++;
691 goto reuse_rx;
692 }
693
694 /* Since we don't have a jumbo ring
695 * copy small packets if mtu > 1500
696 */
697 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
698 (len <= RX_COPY_THRESH)) {
699 struct sk_buff *new_skb;
700
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300701 new_skb = netdev_alloc_skb(bp->dev, len + pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000702 if (new_skb == NULL) {
703 DP(NETIF_MSG_RX_ERR,
704 "ERROR packet dropped "
705 "because of alloc failure\n");
706 fp->eth_q_stats.rx_skb_alloc_failed++;
707 goto reuse_rx;
708 }
709
710 /* aligned copy */
711 skb_copy_from_linear_data_offset(skb, pad,
712 new_skb->data + pad, len);
713 skb_reserve(new_skb, pad);
714 skb_put(new_skb, len);
715
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000716 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000717
718 skb = new_skb;
719
720 } else
721 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
722 dma_unmap_single(&bp->pdev->dev,
723 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800724 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000725 DMA_FROM_DEVICE);
726 skb_reserve(skb, pad);
727 skb_put(skb, len);
728
729 } else {
730 DP(NETIF_MSG_RX_ERR,
731 "ERROR packet dropped because "
732 "of alloc failure\n");
733 fp->eth_q_stats.rx_skb_alloc_failed++;
734reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000735 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000736 goto next_rx;
737 }
738
739 skb->protocol = eth_type_trans(skb, bp->dev);
740
741 /* Set Toeplitz hash for a none-LRO skb */
742 bnx2x_set_skb_rxhash(bp, cqe, skb);
743
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700744 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000745
Michał Mirosław66371c42011-04-12 09:38:23 +0000746 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300747
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000748 if (likely(BNX2X_RX_CSUM_OK(cqe)))
749 skb->ip_summed = CHECKSUM_UNNECESSARY;
750 else
751 fp->eth_q_stats.hw_csum_err++;
752 }
753 }
754
755 skb_record_rx_queue(skb, fp->index);
756
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300757 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
758 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000759 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300760 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000761 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000762
763
764next_rx:
765 rx_buf->skb = NULL;
766
767 bd_cons = NEXT_RX_IDX(bd_cons);
768 bd_prod = NEXT_RX_IDX(bd_prod);
769 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
770 rx_pkt++;
771next_cqe:
772 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
773 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
774
775 if (rx_pkt == budget)
776 break;
777 } /* while */
778
779 fp->rx_bd_cons = bd_cons;
780 fp->rx_bd_prod = bd_prod_fw;
781 fp->rx_comp_cons = sw_comp_cons;
782 fp->rx_comp_prod = sw_comp_prod;
783
784 /* Update producers */
785 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
786 fp->rx_sge_prod);
787
788 fp->rx_pkt += rx_pkt;
789 fp->rx_calls++;
790
791 return rx_pkt;
792}
793
794static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
795{
796 struct bnx2x_fastpath *fp = fp_cookie;
797 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000798 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000799
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000800 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
801 "[fp %d fw_sd %d igusb %d]\n",
802 fp->index, fp->fw_sb_id, fp->igu_sb_id);
803 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000804
805#ifdef BNX2X_STOP_ON_ERROR
806 if (unlikely(bp->panic))
807 return IRQ_HANDLED;
808#endif
809
810 /* Handle Rx and Tx according to MSI-X vector */
811 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000812
813 for_each_cos_in_tx_queue(fp, cos)
814 prefetch(fp->txdata[cos].tx_cons_sb);
815
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000816 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000817 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
818
819 return IRQ_HANDLED;
820}
821
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000822/* HW Lock for shared dual port PHYs */
823void bnx2x_acquire_phy_lock(struct bnx2x *bp)
824{
825 mutex_lock(&bp->port.phy_mutex);
826
827 if (bp->port.need_hw_lock)
828 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
829}
830
831void bnx2x_release_phy_lock(struct bnx2x *bp)
832{
833 if (bp->port.need_hw_lock)
834 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
835
836 mutex_unlock(&bp->port.phy_mutex);
837}
838
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800839/* calculates MF speed according to current linespeed and MF configuration */
840u16 bnx2x_get_mf_speed(struct bnx2x *bp)
841{
842 u16 line_speed = bp->link_vars.line_speed;
843 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000844 u16 maxCfg = bnx2x_extract_max_cfg(bp,
845 bp->mf_config[BP_VN(bp)]);
846
847 /* Calculate the current MAX line speed limit for the MF
848 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800849 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000850 if (IS_MF_SI(bp))
851 line_speed = (line_speed * maxCfg) / 100;
852 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800853 u16 vn_max_rate = maxCfg * 100;
854
855 if (vn_max_rate < line_speed)
856 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000857 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800858 }
859
860 return line_speed;
861}
862
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000863/**
864 * bnx2x_fill_report_data - fill link report data to report
865 *
866 * @bp: driver handle
867 * @data: link state to update
868 *
869 * It uses a none-atomic bit operations because is called under the mutex.
870 */
871static inline void bnx2x_fill_report_data(struct bnx2x *bp,
872 struct bnx2x_link_report_data *data)
873{
874 u16 line_speed = bnx2x_get_mf_speed(bp);
875
876 memset(data, 0, sizeof(*data));
877
878 /* Fill the report data: efective line speed */
879 data->line_speed = line_speed;
880
881 /* Link is down */
882 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
883 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
884 &data->link_report_flags);
885
886 /* Full DUPLEX */
887 if (bp->link_vars.duplex == DUPLEX_FULL)
888 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
889
890 /* Rx Flow Control is ON */
891 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
892 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
893
894 /* Tx Flow Control is ON */
895 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
896 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
897}
898
899/**
900 * bnx2x_link_report - report link status to OS.
901 *
902 * @bp: driver handle
903 *
904 * Calls the __bnx2x_link_report() under the same locking scheme
905 * as a link/PHY state managing code to ensure a consistent link
906 * reporting.
907 */
908
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000909void bnx2x_link_report(struct bnx2x *bp)
910{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000911 bnx2x_acquire_phy_lock(bp);
912 __bnx2x_link_report(bp);
913 bnx2x_release_phy_lock(bp);
914}
915
916/**
917 * __bnx2x_link_report - report link status to OS.
918 *
919 * @bp: driver handle
920 *
921 * None atomic inmlementation.
922 * Should be called under the phy_lock.
923 */
924void __bnx2x_link_report(struct bnx2x *bp)
925{
926 struct bnx2x_link_report_data cur_data;
927
928 /* reread mf_cfg */
929 if (!CHIP_IS_E1(bp))
930 bnx2x_read_mf_cfg(bp);
931
932 /* Read the current link report info */
933 bnx2x_fill_report_data(bp, &cur_data);
934
935 /* Don't report link down or exactly the same link status twice */
936 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
937 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
938 &bp->last_reported_link.link_report_flags) &&
939 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
940 &cur_data.link_report_flags)))
941 return;
942
943 bp->link_cnt++;
944
945 /* We are going to report a new link parameters now -
946 * remember the current data for the next time.
947 */
948 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
949
950 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
951 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000952 netif_carrier_off(bp->dev);
953 netdev_err(bp->dev, "NIC Link is Down\n");
954 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000955 } else {
956 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000957 netdev_info(bp->dev, "NIC Link is Up, ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000958 pr_cont("%d Mbps ", cur_data.line_speed);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000959
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000960 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
961 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000962 pr_cont("full duplex");
963 else
964 pr_cont("half duplex");
965
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000966 /* Handle the FC at the end so that only these flags would be
967 * possibly set. This way we may easily check if there is no FC
968 * enabled.
969 */
970 if (cur_data.link_report_flags) {
971 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
972 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000973 pr_cont(", receive ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000974 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
975 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000976 pr_cont("& transmit ");
977 } else {
978 pr_cont(", transmit ");
979 }
980 pr_cont("flow control ON");
981 }
982 pr_cont("\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000983 }
984}
985
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000986void bnx2x_init_rx_rings(struct bnx2x *bp)
987{
988 int func = BP_FUNC(bp);
989 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300990 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000991 u16 ring_prod;
992 int i, j;
993
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000994 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000995 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000996 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000997
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800998 DP(NETIF_MSG_IFUP,
999 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001001 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001002 /* Fill the per-aggregtion pool */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001003 for (i = 0; i < max_agg_queues; i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001004 struct bnx2x_agg_info *tpa_info =
1005 &fp->tpa_info[i];
1006 struct sw_rx_bd *first_buf =
1007 &tpa_info->first_buf;
1008
1009 first_buf->skb = netdev_alloc_skb(bp->dev,
1010 fp->rx_buf_size);
1011 if (!first_buf->skb) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001012 BNX2X_ERR("Failed to allocate TPA "
1013 "skb pool for queue[%d] - "
1014 "disabling TPA on this "
1015 "queue!\n", j);
1016 bnx2x_free_tpa_pool(bp, fp, i);
1017 fp->disable_tpa = 1;
1018 break;
1019 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001020 dma_unmap_addr_set(first_buf, mapping, 0);
1021 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001022 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001023
1024 /* "next page" elements initialization */
1025 bnx2x_set_next_page_sgl(fp);
1026
1027 /* set SGEs bit mask */
1028 bnx2x_init_sge_ring_bit_mask(fp);
1029
1030 /* Allocate SGEs and initialize the ring elements */
1031 for (i = 0, ring_prod = 0;
1032 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1033
1034 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1035 BNX2X_ERR("was only able to allocate "
1036 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001037 BNX2X_ERR("disabling TPA for "
1038 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001039 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001040 bnx2x_free_rx_sge_range(bp, fp,
1041 ring_prod);
1042 bnx2x_free_tpa_pool(bp, fp,
1043 max_agg_queues);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001044 fp->disable_tpa = 1;
1045 ring_prod = 0;
1046 break;
1047 }
1048 ring_prod = NEXT_SGE_IDX(ring_prod);
1049 }
1050
1051 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001052 }
1053 }
1054
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001055 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001056 struct bnx2x_fastpath *fp = &bp->fp[j];
1057
1058 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001059
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001060 /* Activate BD ring */
1061 /* Warning!
1062 * this will generate an interrupt (to the TSTORM)
1063 * must only be done after chip is initialized
1064 */
1065 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1066 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001067
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001068 if (j != 0)
1069 continue;
1070
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001071 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001072 REG_WR(bp, BAR_USTRORM_INTMEM +
1073 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1074 U64_LO(fp->rx_comp_mapping));
1075 REG_WR(bp, BAR_USTRORM_INTMEM +
1076 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1077 U64_HI(fp->rx_comp_mapping));
1078 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001079 }
1080}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001081
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001082static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1083{
1084 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001085 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001087 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001088 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001089 for_each_cos_in_tx_queue(fp, cos) {
1090 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001091
Ariel Elior6383c0b2011-07-14 08:31:57 +00001092 u16 bd_cons = txdata->tx_bd_cons;
1093 u16 sw_prod = txdata->tx_pkt_prod;
1094 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001095
Ariel Elior6383c0b2011-07-14 08:31:57 +00001096 while (sw_cons != sw_prod) {
1097 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1098 TX_BD(sw_cons));
1099 sw_cons++;
1100 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001101 }
1102 }
1103}
1104
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001105static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1106{
1107 struct bnx2x *bp = fp->bp;
1108 int i;
1109
1110 /* ring wasn't allocated */
1111 if (fp->rx_buf_ring == NULL)
1112 return;
1113
1114 for (i = 0; i < NUM_RX_BD; i++) {
1115 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1116 struct sk_buff *skb = rx_buf->skb;
1117
1118 if (skb == NULL)
1119 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001120 dma_unmap_single(&bp->pdev->dev,
1121 dma_unmap_addr(rx_buf, mapping),
1122 fp->rx_buf_size, DMA_FROM_DEVICE);
1123
1124 rx_buf->skb = NULL;
1125 dev_kfree_skb(skb);
1126 }
1127}
1128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001129static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1130{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001131 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001132
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001133 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001134 struct bnx2x_fastpath *fp = &bp->fp[j];
1135
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001136 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001137
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001138 if (!fp->disable_tpa)
1139 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1140 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001141 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001142 }
1143}
1144
1145void bnx2x_free_skbs(struct bnx2x *bp)
1146{
1147 bnx2x_free_tx_skbs(bp);
1148 bnx2x_free_rx_skbs(bp);
1149}
1150
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001151void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1152{
1153 /* load old values */
1154 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1155
1156 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1157 /* leave all but MAX value */
1158 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1159
1160 /* set new MAX value */
1161 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1162 & FUNC_MF_CFG_MAX_BW_MASK;
1163
1164 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1165 }
1166}
1167
Dmitry Kravkovca924292011-06-14 01:33:08 +00001168/**
1169 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1170 *
1171 * @bp: driver handle
1172 * @nvecs: number of vectors to be released
1173 */
1174static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001175{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001176 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001177
Dmitry Kravkovca924292011-06-14 01:33:08 +00001178 if (nvecs == offset)
1179 return;
1180 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001181 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001182 bp->msix_table[offset].vector);
1183 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001184#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001185 if (nvecs == offset)
1186 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187 offset++;
1188#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001189
Dmitry Kravkovca924292011-06-14 01:33:08 +00001190 for_each_eth_queue(bp, i) {
1191 if (nvecs == offset)
1192 return;
1193 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1194 "irq\n", i, bp->msix_table[offset].vector);
1195
1196 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001197 }
1198}
1199
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001200void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001201{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001202 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001203 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001204 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001205 else if (bp->flags & USING_MSI_FLAG)
1206 free_irq(bp->pdev->irq, bp->dev);
1207 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001208 free_irq(bp->pdev->irq, bp->dev);
1209}
1210
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001211int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001212{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001213 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001214
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001215 bp->msix_table[msix_vec].entry = msix_vec;
1216 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1217 bp->msix_table[0].entry);
1218 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001219
1220#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001221 bp->msix_table[msix_vec].entry = msix_vec;
1222 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1223 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1224 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001225#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001226 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001227 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001228 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001229 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001230 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1231 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232 }
1233
Ariel Elior6383c0b2011-07-14 08:31:57 +00001234 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001235
1236 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001237
1238 /*
1239 * reconfigure number of tx/rx queues according to available
1240 * MSI-X vectors
1241 */
1242 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001243 /* how less vectors we will have? */
1244 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001245
1246 DP(NETIF_MSG_IFUP,
1247 "Trying to use less MSI-X vectors: %d\n", rc);
1248
1249 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1250
1251 if (rc) {
1252 DP(NETIF_MSG_IFUP,
1253 "MSI-X is not attainable rc %d\n", rc);
1254 return rc;
1255 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001256 /*
1257 * decrease number of queues by number of unallocated entries
1258 */
1259 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001260
1261 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1262 bp->num_queues);
1263 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001264 /* fall to INTx if not enough memory */
1265 if (rc == -ENOMEM)
1266 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001267 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1268 return rc;
1269 }
1270
1271 bp->flags |= USING_MSIX_FLAG;
1272
1273 return 0;
1274}
1275
1276static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1277{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001278 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001279
Dmitry Kravkovca924292011-06-14 01:33:08 +00001280 rc = request_irq(bp->msix_table[offset++].vector,
1281 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001282 bp->dev->name, bp->dev);
1283 if (rc) {
1284 BNX2X_ERR("request sp irq failed\n");
1285 return -EBUSY;
1286 }
1287
1288#ifdef BCM_CNIC
1289 offset++;
1290#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001291 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292 struct bnx2x_fastpath *fp = &bp->fp[i];
1293 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1294 bp->dev->name, i);
1295
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001296 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001297 bnx2x_msix_fp_int, 0, fp->name, fp);
1298 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001299 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1300 bp->msix_table[offset].vector, rc);
1301 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001302 return -EBUSY;
1303 }
1304
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001305 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001306 }
1307
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001308 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001309 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001310 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1311 " ... fp[%d] %d\n",
1312 bp->msix_table[0].vector,
1313 0, bp->msix_table[offset].vector,
1314 i - 1, bp->msix_table[offset + i - 1].vector);
1315
1316 return 0;
1317}
1318
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001319int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001320{
1321 int rc;
1322
1323 rc = pci_enable_msi(bp->pdev);
1324 if (rc) {
1325 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1326 return -1;
1327 }
1328 bp->flags |= USING_MSI_FLAG;
1329
1330 return 0;
1331}
1332
1333static int bnx2x_req_irq(struct bnx2x *bp)
1334{
1335 unsigned long flags;
1336 int rc;
1337
1338 if (bp->flags & USING_MSI_FLAG)
1339 flags = 0;
1340 else
1341 flags = IRQF_SHARED;
1342
1343 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1344 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001345 return rc;
1346}
1347
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001348static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1349{
1350 int rc = 0;
1351 if (bp->flags & USING_MSIX_FLAG) {
1352 rc = bnx2x_req_msix_irqs(bp);
1353 if (rc)
1354 return rc;
1355 } else {
1356 bnx2x_ack_int(bp);
1357 rc = bnx2x_req_irq(bp);
1358 if (rc) {
1359 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1360 return rc;
1361 }
1362 if (bp->flags & USING_MSI_FLAG) {
1363 bp->dev->irq = bp->pdev->irq;
1364 netdev_info(bp->dev, "using MSI IRQ %d\n",
1365 bp->pdev->irq);
1366 }
1367 }
1368
1369 return 0;
1370}
1371
1372static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001373{
1374 int i;
1375
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001376 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001377 napi_enable(&bnx2x_fp(bp, i, napi));
1378}
1379
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001380static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001381{
1382 int i;
1383
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001384 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001385 napi_disable(&bnx2x_fp(bp, i, napi));
1386}
1387
1388void bnx2x_netif_start(struct bnx2x *bp)
1389{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001390 if (netif_running(bp->dev)) {
1391 bnx2x_napi_enable(bp);
1392 bnx2x_int_enable(bp);
1393 if (bp->state == BNX2X_STATE_OPEN)
1394 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001395 }
1396}
1397
1398void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1399{
1400 bnx2x_int_disable_sync(bp, disable_hw);
1401 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001402}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001403
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001404u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001406 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001407#ifdef BCM_CNIC
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001408 if (NO_FCOE(bp))
1409 return skb_tx_hash(dev, skb);
1410 else {
1411 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto);
1413
1414 /* Skip VLAN tag if present */
1415 if (ether_type == ETH_P_8021Q) {
1416 struct vlan_ethhdr *vhdr =
1417 (struct vlan_ethhdr *)skb->data;
1418
1419 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420 }
1421
1422 /* If ethertype is FCoE or FIP - use FCoE ring */
1423 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001424 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001425 }
1426#endif
1427 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1428 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001429 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001430}
1431
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001432void bnx2x_set_num_queues(struct bnx2x *bp)
1433{
1434 switch (bp->multi_mode) {
1435 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001436 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001437 break;
1438 case ETH_RSS_MODE_REGULAR:
1439 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001440 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001441
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001442 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001443 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001444 break;
1445 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001446
1447 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001448 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001449}
1450
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001451static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1452{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001453 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001454
Ariel Elior6383c0b2011-07-14 08:31:57 +00001455 tx = MAX_TXQS_PER_COS * bp->max_cos;
1456 rx = BNX2X_NUM_ETH_QUEUES(bp);
1457
1458/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001459#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001460 if (!NO_FCOE(bp)) {
1461 rx += FCOE_PRESENT;
1462 tx += FCOE_PRESENT;
1463 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001464#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001465
1466 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1467 if (rc) {
1468 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1469 return rc;
1470 }
1471 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1472 if (rc) {
1473 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1474 return rc;
1475 }
1476
1477 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1478 tx, rx);
1479
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001480 return rc;
1481}
1482
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001483static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1484{
1485 int i;
1486
1487 for_each_queue(bp, i) {
1488 struct bnx2x_fastpath *fp = &bp->fp[i];
1489
1490 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1491 if (IS_FCOE_IDX(i))
1492 /*
1493 * Although there are no IP frames expected to arrive to
1494 * this ring we still want to add an
1495 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1496 * overrun attack.
1497 */
1498 fp->rx_buf_size =
1499 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001500 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001501 else
1502 fp->rx_buf_size =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001503 bp->dev->mtu + ETH_OVREHEAD +
1504 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001505 }
1506}
1507
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001508static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1509{
1510 int i;
1511 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1512 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1513
1514 /*
1515 * Prepare the inital contents fo the indirection table if RSS is
1516 * enabled
1517 */
1518 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1519 for (i = 0; i < sizeof(ind_table); i++)
1520 ind_table[i] =
1521 bp->fp->cl_id + (i % num_eth_queues);
1522 }
1523
1524 /*
1525 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1526 * per-port, so if explicit configuration is needed , do it only
1527 * for a PMF.
1528 *
1529 * For 57712 and newer on the other hand it's a per-function
1530 * configuration.
1531 */
1532 return bnx2x_config_rss_pf(bp, ind_table,
1533 bp->port.pmf || !CHIP_IS_E1x(bp));
1534}
1535
1536int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1537{
1538 struct bnx2x_config_rss_params params = {0};
1539 int i;
1540
1541 /* Although RSS is meaningless when there is a single HW queue we
1542 * still need it enabled in order to have HW Rx hash generated.
1543 *
1544 * if (!is_eth_multi(bp))
1545 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1546 */
1547
1548 params.rss_obj = &bp->rss_conf_obj;
1549
1550 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1551
1552 /* RSS mode */
1553 switch (bp->multi_mode) {
1554 case ETH_RSS_MODE_DISABLED:
1555 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1556 break;
1557 case ETH_RSS_MODE_REGULAR:
1558 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1559 break;
1560 case ETH_RSS_MODE_VLAN_PRI:
1561 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1562 break;
1563 case ETH_RSS_MODE_E1HOV_PRI:
1564 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1565 break;
1566 case ETH_RSS_MODE_IP_DSCP:
1567 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1568 break;
1569 default:
1570 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1571 return -EINVAL;
1572 }
1573
1574 /* If RSS is enabled */
1575 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1576 /* RSS configuration */
1577 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1578 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1579 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1580 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1581
1582 /* Hash bits */
1583 params.rss_result_mask = MULTI_MASK;
1584
1585 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1586
1587 if (config_hash) {
1588 /* RSS keys */
1589 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1590 params.rss_key[i] = random32();
1591
1592 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1593 }
1594 }
1595
1596 return bnx2x_config_rss(bp, &params);
1597}
1598
1599static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1600{
1601 struct bnx2x_func_state_params func_params = {0};
1602
1603 /* Prepare parameters for function state transitions */
1604 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1605
1606 func_params.f_obj = &bp->func_obj;
1607 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1608
1609 func_params.params.hw_init.load_phase = load_code;
1610
1611 return bnx2x_func_state_change(bp, &func_params);
1612}
1613
1614/*
1615 * Cleans the object that have internal lists without sending
1616 * ramrods. Should be run when interrutps are disabled.
1617 */
1618static void bnx2x_squeeze_objects(struct bnx2x *bp)
1619{
1620 int rc;
1621 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1622 struct bnx2x_mcast_ramrod_params rparam = {0};
1623 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1624
1625 /***************** Cleanup MACs' object first *************************/
1626
1627 /* Wait for completion of requested */
1628 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1629 /* Perform a dry cleanup */
1630 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1631
1632 /* Clean ETH primary MAC */
1633 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1634 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1635 &ramrod_flags);
1636 if (rc != 0)
1637 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1638
1639 /* Cleanup UC list */
1640 vlan_mac_flags = 0;
1641 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1642 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1643 &ramrod_flags);
1644 if (rc != 0)
1645 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1646
1647 /***************** Now clean mcast object *****************************/
1648 rparam.mcast_obj = &bp->mcast_obj;
1649 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1650
1651 /* Add a DEL command... */
1652 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1653 if (rc < 0)
1654 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1655 "object: %d\n", rc);
1656
1657 /* ...and wait until all pending commands are cleared */
1658 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1659 while (rc != 0) {
1660 if (rc < 0) {
1661 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1662 rc);
1663 return;
1664 }
1665
1666 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1667 }
1668}
1669
1670#ifndef BNX2X_STOP_ON_ERROR
1671#define LOAD_ERROR_EXIT(bp, label) \
1672 do { \
1673 (bp)->state = BNX2X_STATE_ERROR; \
1674 goto label; \
1675 } while (0)
1676#else
1677#define LOAD_ERROR_EXIT(bp, label) \
1678 do { \
1679 (bp)->state = BNX2X_STATE_ERROR; \
1680 (bp)->panic = 1; \
1681 return -EBUSY; \
1682 } while (0)
1683#endif
1684
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001685/* must be called with rtnl_lock */
1686int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1687{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001688 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001689 u32 load_code;
1690 int i, rc;
1691
1692#ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1694 return -EPERM;
1695#endif
1696
1697 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1698
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001699 /* Set the initial link reported state to link down */
1700 bnx2x_acquire_phy_lock(bp);
1701 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1702 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1703 &bp->last_reported_link.link_report_flags);
1704 bnx2x_release_phy_lock(bp);
1705
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001706 /* must be called before memory allocation and HW init */
1707 bnx2x_ilt_set_info(bp);
1708
Ariel Elior6383c0b2011-07-14 08:31:57 +00001709 /*
1710 * Zero fastpath structures preserving invariants like napi, which are
1711 * allocated only once, fp index, max_cos, bp pointer.
1712 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001713 */
1714 for_each_queue(bp, i)
1715 bnx2x_bz_fp(bp, i);
1716
Ariel Elior6383c0b2011-07-14 08:31:57 +00001717
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001718 /* Set the receive queues buffer size */
1719 bnx2x_set_rx_buf_size(bp);
1720
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001721 if (bnx2x_alloc_mem(bp))
1722 return -ENOMEM;
1723
1724 /* As long as bnx2x_alloc_mem() may possibly update
1725 * bp->num_queues, bnx2x_set_real_num_queues() should always
1726 * come after it.
1727 */
1728 rc = bnx2x_set_real_num_queues(bp);
1729 if (rc) {
1730 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001731 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001732 }
1733
Ariel Elior6383c0b2011-07-14 08:31:57 +00001734 /* configure multi cos mappings in kernel.
1735 * this configuration may be overriden by a multi class queue discipline
1736 * or by a dcbx negotiation result.
1737 */
1738 bnx2x_setup_tc(bp->dev, bp->max_cos);
1739
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001740 bnx2x_napi_enable(bp);
1741
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001742 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001743 * Returns the type of LOAD command:
1744 * if it is the first port to be initialized
1745 * common blocks should be initialized, otherwise - not
1746 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001747 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001748 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001749 if (!load_code) {
1750 BNX2X_ERR("MCP response failure, aborting\n");
1751 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001752 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001753 }
1754 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1755 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001756 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001757 }
1758
1759 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001760 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001761
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001762 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1763 path, load_count[path][0], load_count[path][1],
1764 load_count[path][2]);
1765 load_count[path][0]++;
1766 load_count[path][1 + port]++;
1767 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1768 path, load_count[path][0], load_count[path][1],
1769 load_count[path][2]);
1770 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001771 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001772 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001773 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1774 else
1775 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1776 }
1777
1778 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001779 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001780 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001781 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001782 /*
1783 * We need the barrier to ensure the ordering between the
1784 * writing to bp->port.pmf here and reading it from the
1785 * bnx2x_periodic_task().
1786 */
1787 smp_mb();
1788 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1789 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001790 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001791
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001792 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1793
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001794 /* Init Function state controlling object */
1795 bnx2x__init_func_obj(bp);
1796
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001797 /* Initialize HW */
1798 rc = bnx2x_init_hw(bp, load_code);
1799 if (rc) {
1800 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001801 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001802 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001803 }
1804
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001805 /* Connect to IRQs */
1806 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001807 if (rc) {
1808 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001809 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001810 }
1811
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001812 /* Setup NIC internals and enable interrupts */
1813 bnx2x_nic_init(bp, load_code);
1814
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001815 /* Init per-function objects */
1816 bnx2x_init_bp_objs(bp);
1817
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001818 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1819 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001820 (bp->common.shmem2_base)) {
1821 if (SHMEM2_HAS(bp, dcc_support))
1822 SHMEM2_WR(bp, dcc_support,
1823 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1824 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1825 }
1826
1827 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1828 rc = bnx2x_func_start(bp);
1829 if (rc) {
1830 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001831 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001832 LOAD_ERROR_EXIT(bp, load_error3);
1833 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001834
1835 /* Send LOAD_DONE command to MCP */
1836 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001837 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001838 if (!load_code) {
1839 BNX2X_ERR("MCP response failure, aborting\n");
1840 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001841 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001842 }
1843 }
1844
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001845 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001846 if (rc) {
1847 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001848 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001849 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001850
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001851#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001852 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001853 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001854#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001855
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001856 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001857 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001858 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001859 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001860 }
1861
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001862 rc = bnx2x_init_rss_pf(bp);
1863 if (rc)
1864 LOAD_ERROR_EXIT(bp, load_error4);
1865
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001866 /* Now when Clients are configured we are ready to work */
1867 bp->state = BNX2X_STATE_OPEN;
1868
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001869 /* Configure a ucast MAC */
1870 rc = bnx2x_set_eth_mac(bp, true);
1871 if (rc)
1872 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001873
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001874 if (bp->pending_max) {
1875 bnx2x_update_max_mf_config(bp, bp->pending_max);
1876 bp->pending_max = 0;
1877 }
1878
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001879 if (bp->port.pmf)
1880 bnx2x_initial_phy_init(bp, load_mode);
1881
1882 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001883
1884 /* Initialize Rx filter. */
1885 netif_addr_lock_bh(bp->dev);
1886 bnx2x_set_rx_mode(bp->dev);
1887 netif_addr_unlock_bh(bp->dev);
1888
1889 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001890 switch (load_mode) {
1891 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001892 /* Tx queue should be only reenabled */
1893 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001894 break;
1895
1896 case LOAD_OPEN:
1897 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001898 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001899 break;
1900
1901 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001902 bp->state = BNX2X_STATE_DIAG;
1903 break;
1904
1905 default:
1906 break;
1907 }
1908
1909 if (!bp->port.pmf)
1910 bnx2x__link_status_update(bp);
1911
1912 /* start the timer */
1913 mod_timer(&bp->timer, jiffies + bp->current_interval);
1914
1915#ifdef BCM_CNIC
1916 bnx2x_setup_cnic_irq_info(bp);
1917 if (bp->state == BNX2X_STATE_OPEN)
1918 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1919#endif
1920 bnx2x_inc_load_cnt(bp);
1921
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001922 /* Wait for all pending SP commands to complete */
1923 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1924 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1925 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1926 return -EBUSY;
1927 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001928
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001929 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001930 return 0;
1931
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001932#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001933load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001934#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001935 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001936 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001937#endif
1938load_error3:
1939 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001940
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001941 /* Clean queueable objects */
1942 bnx2x_squeeze_objects(bp);
1943
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001944 /* Free SKBs, SGEs, TPA pool and driver internals */
1945 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001946 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001947 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001948
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001949 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001950 bnx2x_free_irq(bp);
1951load_error2:
1952 if (!BP_NOMCP(bp)) {
1953 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1954 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1955 }
1956
1957 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001958load_error1:
1959 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001960load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001961 bnx2x_free_mem(bp);
1962
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001963 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001964#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001965}
1966
1967/* must be called with rtnl_lock */
1968int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1969{
1970 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001971 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001972
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001973 if ((bp->state == BNX2X_STATE_CLOSED) ||
1974 (bp->state == BNX2X_STATE_ERROR)) {
1975 /* We can get here if the driver has been unloaded
1976 * during parity error recovery and is either waiting for a
1977 * leader to complete or for other functions to unload and
1978 * then ifdown has been issued. In this case we want to
1979 * unload and let other functions to complete a recovery
1980 * process.
1981 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001982 bp->recovery_state = BNX2X_RECOVERY_DONE;
1983 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001984 bnx2x_release_leader_lock(bp);
1985 smp_mb();
1986
1987 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001988
1989 return -EINVAL;
1990 }
1991
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00001992 /* Stop Tx */
1993 bnx2x_tx_disable(bp);
1994
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001995#ifdef BCM_CNIC
1996 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1997#endif
1998 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001999 smp_mb();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002000
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002001 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002002
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002003 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002004
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002005 /* Set ALWAYS_ALIVE bit in shmem */
2006 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2007
2008 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002009
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002010 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002011
2012 /* Cleanup the chip if needed */
2013 if (unload_mode != UNLOAD_RECOVERY)
2014 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002015 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002016 /* Send the UNLOAD_REQUEST to the MCP */
2017 bnx2x_send_unload_req(bp, unload_mode);
2018
2019 /*
2020 * Prevent transactions to host from the functions on the
2021 * engine that doesn't reset global blocks in case of global
2022 * attention once gloabl blocks are reset and gates are opened
2023 * (the engine which leader will perform the recovery
2024 * last).
2025 */
2026 if (!CHIP_IS_E1x(bp))
2027 bnx2x_pf_disable(bp);
2028
2029 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002030 bnx2x_netif_stop(bp, 1);
2031
2032 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002033 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002034
2035 /* Report UNLOAD_DONE to MCP */
2036 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002037 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002039 /*
2040 * At this stage no more interrupts will arrive so we may safly clean
2041 * the queueable objects here in case they failed to get cleaned so far.
2042 */
2043 bnx2x_squeeze_objects(bp);
2044
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002045 bp->port.pmf = 0;
2046
2047 /* Free SKBs, SGEs, TPA pool and driver internals */
2048 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002049 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002050 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002051
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002052 bnx2x_free_mem(bp);
2053
2054 bp->state = BNX2X_STATE_CLOSED;
2055
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002056 /* Check if there are pending parity attentions. If there are - set
2057 * RECOVERY_IN_PROGRESS.
2058 */
2059 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2060 bnx2x_set_reset_in_progress(bp);
2061
2062 /* Set RESET_IS_GLOBAL if needed */
2063 if (global)
2064 bnx2x_set_reset_global(bp);
2065 }
2066
2067
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002068 /* The last driver must disable a "close the gate" if there is no
2069 * parity attention or "process kill" pending.
2070 */
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002071 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002072 bnx2x_disable_close_the_gate(bp);
2073
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002074 return 0;
2075}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002076
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002077int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2078{
2079 u16 pmcsr;
2080
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002081 /* If there is no power capability, silently succeed */
2082 if (!bp->pm_cap) {
2083 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2084 return 0;
2085 }
2086
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002087 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2088
2089 switch (state) {
2090 case PCI_D0:
2091 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2092 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2093 PCI_PM_CTRL_PME_STATUS));
2094
2095 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2096 /* delay required during transition out of D3hot */
2097 msleep(20);
2098 break;
2099
2100 case PCI_D3hot:
2101 /* If there are other clients above don't
2102 shut down the power */
2103 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2104 return 0;
2105 /* Don't shut down the power for emulation and FPGA */
2106 if (CHIP_REV_IS_SLOW(bp))
2107 return 0;
2108
2109 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2110 pmcsr |= 3;
2111
2112 if (bp->wol)
2113 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2114
2115 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2116 pmcsr);
2117
2118 /* No more memory access after this point until
2119 * device is brought back to D0.
2120 */
2121 break;
2122
2123 default:
2124 return -EINVAL;
2125 }
2126 return 0;
2127}
2128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002129/*
2130 * net_device service functions
2131 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002132int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002133{
2134 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002135 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002136 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2137 napi);
2138 struct bnx2x *bp = fp->bp;
2139
2140 while (1) {
2141#ifdef BNX2X_STOP_ON_ERROR
2142 if (unlikely(bp->panic)) {
2143 napi_complete(napi);
2144 return 0;
2145 }
2146#endif
2147
Ariel Elior6383c0b2011-07-14 08:31:57 +00002148 for_each_cos_in_tx_queue(fp, cos)
2149 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2150 bnx2x_tx_int(bp, &fp->txdata[cos]);
2151
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002152
2153 if (bnx2x_has_rx_work(fp)) {
2154 work_done += bnx2x_rx_int(fp, budget - work_done);
2155
2156 /* must not complete if we consumed full budget */
2157 if (work_done >= budget)
2158 break;
2159 }
2160
2161 /* Fall out from the NAPI loop if needed */
2162 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002163#ifdef BCM_CNIC
2164 /* No need to update SB for FCoE L2 ring as long as
2165 * it's connected to the default SB and the SB
2166 * has been updated when NAPI was scheduled.
2167 */
2168 if (IS_FCOE_FP(fp)) {
2169 napi_complete(napi);
2170 break;
2171 }
2172#endif
2173
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002175 /* bnx2x_has_rx_work() reads the status block,
2176 * thus we need to ensure that status block indices
2177 * have been actually read (bnx2x_update_fpsb_idx)
2178 * prior to this check (bnx2x_has_rx_work) so that
2179 * we won't write the "newer" value of the status block
2180 * to IGU (if there was a DMA right after
2181 * bnx2x_has_rx_work and if there is no rmb, the memory
2182 * reading (bnx2x_update_fpsb_idx) may be postponed
2183 * to right before bnx2x_ack_sb). In this case there
2184 * will never be another interrupt until there is
2185 * another update of the status block, while there
2186 * is still unhandled work.
2187 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002188 rmb();
2189
2190 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2191 napi_complete(napi);
2192 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002193 DP(NETIF_MSG_HW,
2194 "Update index to %d\n", fp->fp_hc_idx);
2195 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2196 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002197 IGU_INT_ENABLE, 1);
2198 break;
2199 }
2200 }
2201 }
2202
2203 return work_done;
2204}
2205
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002206/* we split the first BD into headers and data BDs
2207 * to ease the pain of our fellow microcode engineers
2208 * we use one mapping for both BDs
2209 * So far this has only been observed to happen
2210 * in Other Operating Systems(TM)
2211 */
2212static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002213 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002214 struct sw_tx_bd *tx_buf,
2215 struct eth_tx_start_bd **tx_bd, u16 hlen,
2216 u16 bd_prod, int nbd)
2217{
2218 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2219 struct eth_tx_bd *d_tx_bd;
2220 dma_addr_t mapping;
2221 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2222
2223 /* first fix first BD */
2224 h_tx_bd->nbd = cpu_to_le16(nbd);
2225 h_tx_bd->nbytes = cpu_to_le16(hlen);
2226
2227 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2228 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2229 h_tx_bd->addr_lo, h_tx_bd->nbd);
2230
2231 /* now get a new data BD
2232 * (after the pbd) and fill it */
2233 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002234 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002235
2236 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2237 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2238
2239 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2240 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2241 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2242
2243 /* this marks the BD as one that has no individual mapping */
2244 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2245
2246 DP(NETIF_MSG_TX_QUEUED,
2247 "TSO split data size is %d (%x:%x)\n",
2248 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2249
2250 /* update tx_bd */
2251 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2252
2253 return bd_prod;
2254}
2255
2256static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2257{
2258 if (fix > 0)
2259 csum = (u16) ~csum_fold(csum_sub(csum,
2260 csum_partial(t_header - fix, fix, 0)));
2261
2262 else if (fix < 0)
2263 csum = (u16) ~csum_fold(csum_add(csum,
2264 csum_partial(t_header, -fix, 0)));
2265
2266 return swab16(csum);
2267}
2268
2269static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2270{
2271 u32 rc;
2272
2273 if (skb->ip_summed != CHECKSUM_PARTIAL)
2274 rc = XMIT_PLAIN;
2275
2276 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002277 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002278 rc = XMIT_CSUM_V6;
2279 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2280 rc |= XMIT_CSUM_TCP;
2281
2282 } else {
2283 rc = XMIT_CSUM_V4;
2284 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2285 rc |= XMIT_CSUM_TCP;
2286 }
2287 }
2288
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002289 if (skb_is_gso_v6(skb))
2290 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2291 else if (skb_is_gso(skb))
2292 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002293
2294 return rc;
2295}
2296
2297#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2298/* check if packet requires linearization (packet is too fragmented)
2299 no need to check fragmentation if page size > 8K (there will be no
2300 violation to FW restrictions) */
2301static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2302 u32 xmit_type)
2303{
2304 int to_copy = 0;
2305 int hlen = 0;
2306 int first_bd_sz = 0;
2307
2308 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2309 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2310
2311 if (xmit_type & XMIT_GSO) {
2312 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2313 /* Check if LSO packet needs to be copied:
2314 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2315 int wnd_size = MAX_FETCH_BD - 3;
2316 /* Number of windows to check */
2317 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2318 int wnd_idx = 0;
2319 int frag_idx = 0;
2320 u32 wnd_sum = 0;
2321
2322 /* Headers length */
2323 hlen = (int)(skb_transport_header(skb) - skb->data) +
2324 tcp_hdrlen(skb);
2325
2326 /* Amount of data (w/o headers) on linear part of SKB*/
2327 first_bd_sz = skb_headlen(skb) - hlen;
2328
2329 wnd_sum = first_bd_sz;
2330
2331 /* Calculate the first sum - it's special */
2332 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2333 wnd_sum +=
2334 skb_shinfo(skb)->frags[frag_idx].size;
2335
2336 /* If there was data on linear skb data - check it */
2337 if (first_bd_sz > 0) {
2338 if (unlikely(wnd_sum < lso_mss)) {
2339 to_copy = 1;
2340 goto exit_lbl;
2341 }
2342
2343 wnd_sum -= first_bd_sz;
2344 }
2345
2346 /* Others are easier: run through the frag list and
2347 check all windows */
2348 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2349 wnd_sum +=
2350 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2351
2352 if (unlikely(wnd_sum < lso_mss)) {
2353 to_copy = 1;
2354 break;
2355 }
2356 wnd_sum -=
2357 skb_shinfo(skb)->frags[wnd_idx].size;
2358 }
2359 } else {
2360 /* in non-LSO too fragmented packet should always
2361 be linearized */
2362 to_copy = 1;
2363 }
2364 }
2365
2366exit_lbl:
2367 if (unlikely(to_copy))
2368 DP(NETIF_MSG_TX_QUEUED,
2369 "Linearization IS REQUIRED for %s packet. "
2370 "num_frags %d hlen %d first_bd_sz %d\n",
2371 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2372 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2373
2374 return to_copy;
2375}
2376#endif
2377
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002378static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2379 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002380{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002381 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2382 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2383 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002384 if ((xmit_type & XMIT_GSO_V6) &&
2385 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002386 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002387}
2388
2389/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002390 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002391 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002392 * @skb: packet skb
2393 * @pbd: parse BD
2394 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002395 */
2396static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2397 struct eth_tx_parse_bd_e1x *pbd,
2398 u32 xmit_type)
2399{
2400 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2401 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2402 pbd->tcp_flags = pbd_tcp_flags(skb);
2403
2404 if (xmit_type & XMIT_GSO_V4) {
2405 pbd->ip_id = swab16(ip_hdr(skb)->id);
2406 pbd->tcp_pseudo_csum =
2407 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2408 ip_hdr(skb)->daddr,
2409 0, IPPROTO_TCP, 0));
2410
2411 } else
2412 pbd->tcp_pseudo_csum =
2413 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2414 &ipv6_hdr(skb)->daddr,
2415 0, IPPROTO_TCP, 0));
2416
2417 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2418}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002419
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002420/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002421 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002422 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002423 * @bp: driver handle
2424 * @skb: packet skb
2425 * @parsing_data: data to be updated
2426 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002427 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002428 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002429 */
2430static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002431 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002432{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002433 *parsing_data |=
2434 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2435 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2436 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002437
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002438 if (xmit_type & XMIT_CSUM_TCP) {
2439 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2440 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2441 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002442
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002443 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2444 } else
2445 /* We support checksum offload for TCP and UDP only.
2446 * No need to pass the UDP header length - it's a constant.
2447 */
2448 return skb_transport_header(skb) +
2449 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002450}
2451
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002452static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2453 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2454{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002455 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2456
2457 if (xmit_type & XMIT_CSUM_V4)
2458 tx_start_bd->bd_flags.as_bitfield |=
2459 ETH_TX_BD_FLAGS_IP_CSUM;
2460 else
2461 tx_start_bd->bd_flags.as_bitfield |=
2462 ETH_TX_BD_FLAGS_IPV6;
2463
2464 if (!(xmit_type & XMIT_CSUM_TCP))
2465 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002466}
2467
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002468/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002469 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002470 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002471 * @bp: driver handle
2472 * @skb: packet skb
2473 * @pbd: parse BD to be updated
2474 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002475 */
2476static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2477 struct eth_tx_parse_bd_e1x *pbd,
2478 u32 xmit_type)
2479{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002480 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002481
2482 /* for now NS flag is not used in Linux */
2483 pbd->global_data =
2484 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2485 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2486
2487 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002488 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002489
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002490 hlen += pbd->ip_hlen_w;
2491
2492 /* We support checksum offload for TCP and UDP only */
2493 if (xmit_type & XMIT_CSUM_TCP)
2494 hlen += tcp_hdrlen(skb) / 2;
2495 else
2496 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002497
2498 pbd->total_hlen_w = cpu_to_le16(hlen);
2499 hlen = hlen*2;
2500
2501 if (xmit_type & XMIT_CSUM_TCP) {
2502 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2503
2504 } else {
2505 s8 fix = SKB_CS_OFF(skb); /* signed! */
2506
2507 DP(NETIF_MSG_TX_QUEUED,
2508 "hlen %d fix %d csum before fix %x\n",
2509 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2510
2511 /* HW bug: fixup the CSUM */
2512 pbd->tcp_pseudo_csum =
2513 bnx2x_csum_fix(skb_transport_header(skb),
2514 SKB_CS(skb), fix);
2515
2516 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2517 pbd->tcp_pseudo_csum);
2518 }
2519
2520 return hlen;
2521}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002522
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002523/* called with netif_tx_lock
2524 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2525 * netif_wake_queue()
2526 */
2527netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2528{
2529 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002530
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002531 struct bnx2x_fastpath *fp;
2532 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002533 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002534 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002535 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002536 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002537 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002538 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002539 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002540 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002541 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002542 dma_addr_t mapping;
2543 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2544 int i;
2545 u8 hlen = 0;
2546 __le16 pkt_size = 0;
2547 struct ethhdr *eth;
2548 u8 mac_type = UNICAST_ADDRESS;
2549
2550#ifdef BNX2X_STOP_ON_ERROR
2551 if (unlikely(bp->panic))
2552 return NETDEV_TX_BUSY;
2553#endif
2554
Ariel Elior6383c0b2011-07-14 08:31:57 +00002555 txq_index = skb_get_queue_mapping(skb);
2556 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002557
Ariel Elior6383c0b2011-07-14 08:31:57 +00002558 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2559
2560 /* decode the fastpath index and the cos index from the txq */
2561 fp_index = TXQ_TO_FP(txq_index);
2562 txdata_index = TXQ_TO_COS(txq_index);
2563
2564#ifdef BCM_CNIC
2565 /*
2566 * Override the above for the FCoE queue:
2567 * - FCoE fp entry is right after the ETH entries.
2568 * - FCoE L2 queue uses bp->txdata[0] only.
2569 */
2570 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2571 bnx2x_fcoe_tx(bp, txq_index)))) {
2572 fp_index = FCOE_IDX;
2573 txdata_index = 0;
2574 }
2575#endif
2576
2577 /* enable this debug print to view the transmission queue being used
2578 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2579 txq_index, fp_index, txdata_index); */
2580
2581 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002582 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002583 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002584
Ariel Elior6383c0b2011-07-14 08:31:57 +00002585 /* enable this debug print to view the tranmission details
2586 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2587 " tx_data ptr %p fp pointer %p",
2588 txdata->cid, fp_index, txdata_index, txdata, fp); */
2589
2590 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2591 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002592 fp->eth_q_stats.driver_xoff++;
2593 netif_tx_stop_queue(txq);
2594 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2595 return NETDEV_TX_BUSY;
2596 }
2597
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002598 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2599 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002600 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002601 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2602
2603 eth = (struct ethhdr *)skb->data;
2604
2605 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2606 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2607 if (is_broadcast_ether_addr(eth->h_dest))
2608 mac_type = BROADCAST_ADDRESS;
2609 else
2610 mac_type = MULTICAST_ADDRESS;
2611 }
2612
2613#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2614 /* First, check if we need to linearize the skb (due to FW
2615 restrictions). No need to check fragmentation if page size > 8K
2616 (there will be no violation to FW restrictions) */
2617 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2618 /* Statistics of linearization */
2619 bp->lin_cnt++;
2620 if (skb_linearize(skb) != 0) {
2621 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2622 "silently dropping this SKB\n");
2623 dev_kfree_skb_any(skb);
2624 return NETDEV_TX_OK;
2625 }
2626 }
2627#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002628 /* Map skb linear data for DMA */
2629 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2630 skb_headlen(skb), DMA_TO_DEVICE);
2631 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2632 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2633 "silently dropping this SKB\n");
2634 dev_kfree_skb_any(skb);
2635 return NETDEV_TX_OK;
2636 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002637 /*
2638 Please read carefully. First we use one BD which we mark as start,
2639 then we have a parsing info BD (used for TSO or xsum),
2640 and only then we have the rest of the TSO BDs.
2641 (don't forget to mark the last one as last,
2642 and to unmap only AFTER you write to the BD ...)
2643 And above all, all pdb sizes are in words - NOT DWORDS!
2644 */
2645
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002646 /* get current pkt produced now - advance it just before sending packet
2647 * since mapping of pages may fail and cause packet to be dropped
2648 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002649 pkt_prod = txdata->tx_pkt_prod;
2650 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002651
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002652 /* get a tx_buf and first BD
2653 * tx_start_bd may be changed during SPLIT,
2654 * but first_bd will always stay first
2655 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002656 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2657 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002658 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002659
2660 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002661 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2662 mac_type);
2663
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002664 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002665 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002666
2667 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002668 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002669 tx_buf->skb = skb;
2670 tx_buf->flags = 0;
2671
2672 DP(NETIF_MSG_TX_QUEUED,
2673 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002674 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002675
Jesse Grosseab6d182010-10-20 13:56:03 +00002676 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002677 tx_start_bd->vlan_or_ethertype =
2678 cpu_to_le16(vlan_tx_tag_get(skb));
2679 tx_start_bd->bd_flags.as_bitfield |=
2680 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002681 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002682 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002683
2684 /* turn on parsing and get a BD */
2685 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002686
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002687 if (xmit_type & XMIT_CSUM)
2688 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002689
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002690 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002691 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002692 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2693 /* Set PBD in checksum offload case */
2694 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002695 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2696 &pbd_e2_parsing_data,
2697 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002698 if (IS_MF_SI(bp)) {
2699 /*
2700 * fill in the MAC addresses in the PBD - for local
2701 * switching
2702 */
2703 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2704 &pbd_e2->src_mac_addr_mid,
2705 &pbd_e2->src_mac_addr_lo,
2706 eth->h_source);
2707 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2708 &pbd_e2->dst_mac_addr_mid,
2709 &pbd_e2->dst_mac_addr_lo,
2710 eth->h_dest);
2711 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002712 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002713 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002714 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2715 /* Set PBD in checksum offload case */
2716 if (xmit_type & XMIT_CSUM)
2717 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002718
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002719 }
2720
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002721 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002722 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2723 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002724 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002725 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2726 pkt_size = tx_start_bd->nbytes;
2727
2728 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2729 " nbytes %d flags %x vlan %x\n",
2730 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2731 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002732 tx_start_bd->bd_flags.as_bitfield,
2733 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002734
2735 if (xmit_type & XMIT_GSO) {
2736
2737 DP(NETIF_MSG_TX_QUEUED,
2738 "TSO packet len %d hlen %d total len %d tso size %d\n",
2739 skb->len, hlen, skb_headlen(skb),
2740 skb_shinfo(skb)->gso_size);
2741
2742 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2743
2744 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002745 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2746 &tx_start_bd, hlen,
2747 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002748 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002749 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2750 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002751 else
2752 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002753 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002754
2755 /* Set the PBD's parsing_data field if not zero
2756 * (for the chips newer than 57711).
2757 */
2758 if (pbd_e2_parsing_data)
2759 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2760
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002761 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2762
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002763 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002764 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2765 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2766
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002767 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2768 frag->page_offset, frag->size,
2769 DMA_TO_DEVICE);
2770 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2771
2772 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2773 "dropping packet...\n");
2774
2775 /* we need unmap all buffers already mapped
2776 * for this SKB;
2777 * first_bd->nbd need to be properly updated
2778 * before call to bnx2x_free_tx_pkt
2779 */
2780 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002781 bnx2x_free_tx_pkt(bp, txdata,
2782 TX_BD(txdata->tx_pkt_prod));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002783 return NETDEV_TX_OK;
2784 }
2785
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002786 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002787 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002788 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002789 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002790
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002791 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2792 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2793 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2794 le16_add_cpu(&pkt_size, frag->size);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002795 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002796
2797 DP(NETIF_MSG_TX_QUEUED,
2798 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2799 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2800 le16_to_cpu(tx_data_bd->nbytes));
2801 }
2802
2803 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2804
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002805 /* update with actual num BDs */
2806 first_bd->nbd = cpu_to_le16(nbd);
2807
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002808 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2809
2810 /* now send a tx doorbell, counting the next BD
2811 * if the packet contains or ends with it
2812 */
2813 if (TX_BD_POFF(bd_prod) < nbd)
2814 nbd++;
2815
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002816 /* total_pkt_bytes should be set on the first data BD if
2817 * it's not an LSO packet and there is more than one
2818 * data BD. In this case pkt_size is limited by an MTU value.
2819 * However we prefer to set it for an LSO packet (while we don't
2820 * have to) in order to save some CPU cycles in a none-LSO
2821 * case, when we much more care about them.
2822 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002823 if (total_pkt_bd != NULL)
2824 total_pkt_bd->total_pkt_bytes = pkt_size;
2825
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002826 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002827 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002828 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002829 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002830 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2831 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2832 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2833 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002834 if (pbd_e2)
2835 DP(NETIF_MSG_TX_QUEUED,
2836 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2837 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2838 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2839 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2840 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002841 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2842
Ariel Elior6383c0b2011-07-14 08:31:57 +00002843 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002844 /*
2845 * Make sure that the BD data is updated before updating the producer
2846 * since FW might read the BD right after the producer is updated.
2847 * This is only applicable for weak-ordered memory model archs such
2848 * as IA-64. The following barrier is also mandatory since FW will
2849 * assumes packets must have BDs.
2850 */
2851 wmb();
2852
Ariel Elior6383c0b2011-07-14 08:31:57 +00002853 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002854 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002855
Ariel Elior6383c0b2011-07-14 08:31:57 +00002856 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002857
2858 mmiowb();
2859
Ariel Elior6383c0b2011-07-14 08:31:57 +00002860 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002861
Ariel Elior6383c0b2011-07-14 08:31:57 +00002862 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002863 netif_tx_stop_queue(txq);
2864
2865 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2866 * ordering of set_bit() in netif_tx_stop_queue() and read of
2867 * fp->bd_tx_cons */
2868 smp_mb();
2869
2870 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002871 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002872 netif_tx_wake_queue(txq);
2873 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002874 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002875
2876 return NETDEV_TX_OK;
2877}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002878
Ariel Elior6383c0b2011-07-14 08:31:57 +00002879/**
2880 * bnx2x_setup_tc - routine to configure net_device for multi tc
2881 *
2882 * @netdev: net device to configure
2883 * @tc: number of traffic classes to enable
2884 *
2885 * callback connected to the ndo_setup_tc function pointer
2886 */
2887int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2888{
2889 int cos, prio, count, offset;
2890 struct bnx2x *bp = netdev_priv(dev);
2891
2892 /* setup tc must be called under rtnl lock */
2893 ASSERT_RTNL();
2894
2895 /* no traffic classes requested. aborting */
2896 if (!num_tc) {
2897 netdev_reset_tc(dev);
2898 return 0;
2899 }
2900
2901 /* requested to support too many traffic classes */
2902 if (num_tc > bp->max_cos) {
2903 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2904 " requested: %d. max supported is %d",
2905 num_tc, bp->max_cos);
2906 return -EINVAL;
2907 }
2908
2909 /* declare amount of supported traffic classes */
2910 if (netdev_set_num_tc(dev, num_tc)) {
2911 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2912 num_tc);
2913 return -EINVAL;
2914 }
2915
2916 /* configure priority to traffic class mapping */
2917 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2918 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2919 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2920 prio, bp->prio_to_cos[prio]);
2921 }
2922
2923
2924 /* Use this configuration to diffrentiate tc0 from other COSes
2925 This can be used for ets or pfc, and save the effort of setting
2926 up a multio class queue disc or negotiating DCBX with a switch
2927 netdev_set_prio_tc_map(dev, 0, 0);
2928 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2929 for (prio = 1; prio < 16; prio++) {
2930 netdev_set_prio_tc_map(dev, prio, 1);
2931 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2932 } */
2933
2934 /* configure traffic class to transmission queue mapping */
2935 for (cos = 0; cos < bp->max_cos; cos++) {
2936 count = BNX2X_NUM_ETH_QUEUES(bp);
2937 offset = cos * MAX_TXQS_PER_COS;
2938 netdev_set_tc_queue(dev, cos, count, offset);
2939 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2940 cos, offset, count);
2941 }
2942
2943 return 0;
2944}
2945
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002946/* called with rtnl_lock */
2947int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2948{
2949 struct sockaddr *addr = p;
2950 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002951 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002952
2953 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2954 return -EINVAL;
2955
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002956 if (netif_running(dev)) {
2957 rc = bnx2x_set_eth_mac(bp, false);
2958 if (rc)
2959 return rc;
2960 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002961
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002962 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2963
2964 if (netif_running(dev))
2965 rc = bnx2x_set_eth_mac(bp, true);
2966
2967 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002968}
2969
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002970static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2971{
2972 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2973 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002974 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002975
2976 /* Common */
2977#ifdef BCM_CNIC
2978 if (IS_FCOE_IDX(fp_index)) {
2979 memset(sb, 0, sizeof(union host_hc_status_block));
2980 fp->status_blk_mapping = 0;
2981
2982 } else {
2983#endif
2984 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002985 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002986 BNX2X_PCI_FREE(sb->e2_sb,
2987 bnx2x_fp(bp, fp_index,
2988 status_blk_mapping),
2989 sizeof(struct host_hc_status_block_e2));
2990 else
2991 BNX2X_PCI_FREE(sb->e1x_sb,
2992 bnx2x_fp(bp, fp_index,
2993 status_blk_mapping),
2994 sizeof(struct host_hc_status_block_e1x));
2995#ifdef BCM_CNIC
2996 }
2997#endif
2998 /* Rx */
2999 if (!skip_rx_queue(bp, fp_index)) {
3000 bnx2x_free_rx_bds(fp);
3001
3002 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3003 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3004 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3005 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3006 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3007
3008 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3009 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3010 sizeof(struct eth_fast_path_rx_cqe) *
3011 NUM_RCQ_BD);
3012
3013 /* SGE ring */
3014 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3015 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3016 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3017 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3018 }
3019
3020 /* Tx */
3021 if (!skip_tx_queue(bp, fp_index)) {
3022 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003023 for_each_cos_in_tx_queue(fp, cos) {
3024 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3025
3026 DP(BNX2X_MSG_SP,
3027 "freeing tx memory of fp %d cos %d cid %d",
3028 fp_index, cos, txdata->cid);
3029
3030 BNX2X_FREE(txdata->tx_buf_ring);
3031 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3032 txdata->tx_desc_mapping,
3033 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3034 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003035 }
3036 /* end of fastpath */
3037}
3038
3039void bnx2x_free_fp_mem(struct bnx2x *bp)
3040{
3041 int i;
3042 for_each_queue(bp, i)
3043 bnx2x_free_fp_mem_at(bp, i);
3044}
3045
3046static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3047{
3048 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003049 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003050 bnx2x_fp(bp, index, sb_index_values) =
3051 (__le16 *)status_blk.e2_sb->sb.index_values;
3052 bnx2x_fp(bp, index, sb_running_index) =
3053 (__le16 *)status_blk.e2_sb->sb.running_index;
3054 } else {
3055 bnx2x_fp(bp, index, sb_index_values) =
3056 (__le16 *)status_blk.e1x_sb->sb.index_values;
3057 bnx2x_fp(bp, index, sb_running_index) =
3058 (__le16 *)status_blk.e1x_sb->sb.running_index;
3059 }
3060}
3061
3062static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3063{
3064 union host_hc_status_block *sb;
3065 struct bnx2x_fastpath *fp = &bp->fp[index];
3066 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003067 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003068
3069 /* if rx_ring_size specified - use it */
3070 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
Ariel Elior6383c0b2011-07-14 08:31:57 +00003071 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003072
3073 /* allocate at least number of buffers required by FW */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003074 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003075 MIN_RX_SIZE_TPA,
3076 rx_ring_size);
3077
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003078 /* Common */
3079 sb = &bnx2x_fp(bp, index, status_blk);
3080#ifdef BCM_CNIC
3081 if (!IS_FCOE_IDX(index)) {
3082#endif
3083 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003084 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003085 BNX2X_PCI_ALLOC(sb->e2_sb,
3086 &bnx2x_fp(bp, index, status_blk_mapping),
3087 sizeof(struct host_hc_status_block_e2));
3088 else
3089 BNX2X_PCI_ALLOC(sb->e1x_sb,
3090 &bnx2x_fp(bp, index, status_blk_mapping),
3091 sizeof(struct host_hc_status_block_e1x));
3092#ifdef BCM_CNIC
3093 }
3094#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003095
3096 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3097 * set shortcuts for it.
3098 */
3099 if (!IS_FCOE_IDX(index))
3100 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003101
3102 /* Tx */
3103 if (!skip_tx_queue(bp, index)) {
3104 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003105 for_each_cos_in_tx_queue(fp, cos) {
3106 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3107
3108 DP(BNX2X_MSG_SP, "allocating tx memory of "
3109 "fp %d cos %d",
3110 index, cos);
3111
3112 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003113 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003114 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3115 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003116 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003117 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003118 }
3119
3120 /* Rx */
3121 if (!skip_rx_queue(bp, index)) {
3122 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3123 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3124 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3125 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3126 &bnx2x_fp(bp, index, rx_desc_mapping),
3127 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3128
3129 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3130 &bnx2x_fp(bp, index, rx_comp_mapping),
3131 sizeof(struct eth_fast_path_rx_cqe) *
3132 NUM_RCQ_BD);
3133
3134 /* SGE ring */
3135 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3136 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3137 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3138 &bnx2x_fp(bp, index, rx_sge_mapping),
3139 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3140 /* RX BD ring */
3141 bnx2x_set_next_page_rx_bd(fp);
3142
3143 /* CQ ring */
3144 bnx2x_set_next_page_rx_cq(fp);
3145
3146 /* BDs */
3147 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3148 if (ring_size < rx_ring_size)
3149 goto alloc_mem_err;
3150 }
3151
3152 return 0;
3153
3154/* handles low memory cases */
3155alloc_mem_err:
3156 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3157 index, ring_size);
3158 /* FW will drop all packets if queue is not big enough,
3159 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003160 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003161 */
3162 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003163 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003164 /* release memory allocated for this queue */
3165 bnx2x_free_fp_mem_at(bp, index);
3166 return -ENOMEM;
3167 }
3168 return 0;
3169}
3170
3171int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3172{
3173 int i;
3174
3175 /**
3176 * 1. Allocate FP for leading - fatal if error
3177 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003178 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3179 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003180 */
3181
3182 /* leading */
3183 if (bnx2x_alloc_fp_mem_at(bp, 0))
3184 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003185
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003186#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003187 if (!NO_FCOE(bp))
3188 /* FCoE */
3189 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3190 /* we will fail load process instead of mark
3191 * NO_FCOE_FLAG
3192 */
3193 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003194#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003195
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003196 /* RSS */
3197 for_each_nondefault_eth_queue(bp, i)
3198 if (bnx2x_alloc_fp_mem_at(bp, i))
3199 break;
3200
3201 /* handle memory failures */
3202 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3203 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3204
3205 WARN_ON(delta < 0);
3206#ifdef BCM_CNIC
3207 /**
3208 * move non eth FPs next to last eth FP
3209 * must be done in that order
3210 * FCOE_IDX < FWD_IDX < OOO_IDX
3211 */
3212
Ariel Elior6383c0b2011-07-14 08:31:57 +00003213 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003214 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3215#endif
3216 bp->num_queues -= delta;
3217 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3218 bp->num_queues + delta, bp->num_queues);
3219 }
3220
3221 return 0;
3222}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003223
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003224void bnx2x_free_mem_bp(struct bnx2x *bp)
3225{
3226 kfree(bp->fp);
3227 kfree(bp->msix_table);
3228 kfree(bp->ilt);
3229}
3230
3231int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3232{
3233 struct bnx2x_fastpath *fp;
3234 struct msix_entry *tbl;
3235 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003236 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003237
Ariel Elior6383c0b2011-07-14 08:31:57 +00003238 /*
3239 * The biggest MSI-X table we might need is as a maximum number of fast
3240 * path IGU SBs plus default SB (for PF).
3241 */
3242 msix_table_size = bp->igu_sb_cnt + 1;
3243
3244 /* fp array: RSS plus CNIC related L2 queues */
3245 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3246 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003247 if (!fp)
3248 goto alloc_err;
3249 bp->fp = fp;
3250
3251 /* msix table */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003252 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003253 if (!tbl)
3254 goto alloc_err;
3255 bp->msix_table = tbl;
3256
3257 /* ilt */
3258 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3259 if (!ilt)
3260 goto alloc_err;
3261 bp->ilt = ilt;
3262
3263 return 0;
3264alloc_err:
3265 bnx2x_free_mem_bp(bp);
3266 return -ENOMEM;
3267
3268}
3269
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003270int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003271{
3272 struct bnx2x *bp = netdev_priv(dev);
3273
3274 if (unlikely(!netif_running(dev)))
3275 return 0;
3276
3277 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3278 return bnx2x_nic_load(bp, LOAD_NORMAL);
3279}
3280
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003281int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3282{
3283 u32 sel_phy_idx = 0;
3284 if (bp->link_params.num_phys <= 1)
3285 return INT_PHY;
3286
3287 if (bp->link_vars.link_up) {
3288 sel_phy_idx = EXT_PHY1;
3289 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3290 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3291 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3292 sel_phy_idx = EXT_PHY2;
3293 } else {
3294
3295 switch (bnx2x_phy_selection(&bp->link_params)) {
3296 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3297 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3298 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3299 sel_phy_idx = EXT_PHY1;
3300 break;
3301 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3302 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3303 sel_phy_idx = EXT_PHY2;
3304 break;
3305 }
3306 }
3307
3308 return sel_phy_idx;
3309
3310}
3311int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3312{
3313 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3314 /*
3315 * The selected actived PHY is always after swapping (in case PHY
3316 * swapping is enabled). So when swapping is enabled, we need to reverse
3317 * the configuration
3318 */
3319
3320 if (bp->link_params.multi_phy_config &
3321 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3322 if (sel_phy_idx == EXT_PHY1)
3323 sel_phy_idx = EXT_PHY2;
3324 else if (sel_phy_idx == EXT_PHY2)
3325 sel_phy_idx = EXT_PHY1;
3326 }
3327 return LINK_CONFIG_IDX(sel_phy_idx);
3328}
3329
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003330/* called with rtnl_lock */
3331int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3332{
3333 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003334
3335 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3336 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3337 return -EAGAIN;
3338 }
3339
3340 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3341 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3342 return -EINVAL;
3343
3344 /* This does not race with packet allocation
3345 * because the actual alloc size is
3346 * only updated as part of load
3347 */
3348 dev->mtu = new_mtu;
3349
Michał Mirosław66371c42011-04-12 09:38:23 +00003350 return bnx2x_reload_if_running(dev);
3351}
3352
3353u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3354{
3355 struct bnx2x *bp = netdev_priv(dev);
3356
3357 /* TPA requires Rx CSUM offloading */
3358 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3359 features &= ~NETIF_F_LRO;
3360
3361 return features;
3362}
3363
3364int bnx2x_set_features(struct net_device *dev, u32 features)
3365{
3366 struct bnx2x *bp = netdev_priv(dev);
3367 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003368 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003369
3370 if (features & NETIF_F_LRO)
3371 flags |= TPA_ENABLE_FLAG;
3372 else
3373 flags &= ~TPA_ENABLE_FLAG;
3374
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003375 if (features & NETIF_F_LOOPBACK) {
3376 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3377 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3378 bnx2x_reload = true;
3379 }
3380 } else {
3381 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3382 bp->link_params.loopback_mode = LOOPBACK_NONE;
3383 bnx2x_reload = true;
3384 }
3385 }
3386
Michał Mirosław66371c42011-04-12 09:38:23 +00003387 if (flags ^ bp->flags) {
3388 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003389 bnx2x_reload = true;
3390 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003391
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003392 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003393 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3394 return bnx2x_reload_if_running(dev);
3395 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003396 }
3397
Michał Mirosław66371c42011-04-12 09:38:23 +00003398 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003399}
3400
3401void bnx2x_tx_timeout(struct net_device *dev)
3402{
3403 struct bnx2x *bp = netdev_priv(dev);
3404
3405#ifdef BNX2X_STOP_ON_ERROR
3406 if (!bp->panic)
3407 bnx2x_panic();
3408#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003409
3410 smp_mb__before_clear_bit();
3411 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3412 smp_mb__after_clear_bit();
3413
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003414 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003415 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003416}
3417
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003418int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3419{
3420 struct net_device *dev = pci_get_drvdata(pdev);
3421 struct bnx2x *bp;
3422
3423 if (!dev) {
3424 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3425 return -ENODEV;
3426 }
3427 bp = netdev_priv(dev);
3428
3429 rtnl_lock();
3430
3431 pci_save_state(pdev);
3432
3433 if (!netif_running(dev)) {
3434 rtnl_unlock();
3435 return 0;
3436 }
3437
3438 netif_device_detach(dev);
3439
3440 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3441
3442 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3443
3444 rtnl_unlock();
3445
3446 return 0;
3447}
3448
3449int bnx2x_resume(struct pci_dev *pdev)
3450{
3451 struct net_device *dev = pci_get_drvdata(pdev);
3452 struct bnx2x *bp;
3453 int rc;
3454
3455 if (!dev) {
3456 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3457 return -ENODEV;
3458 }
3459 bp = netdev_priv(dev);
3460
3461 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3462 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3463 return -EAGAIN;
3464 }
3465
3466 rtnl_lock();
3467
3468 pci_restore_state(pdev);
3469
3470 if (!netif_running(dev)) {
3471 rtnl_unlock();
3472 return 0;
3473 }
3474
3475 bnx2x_set_power_state(bp, PCI_D0);
3476 netif_device_attach(dev);
3477
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003478 /* Since the chip was reset, clear the FW sequence number */
3479 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003480 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3481
3482 rtnl_unlock();
3483
3484 return rc;
3485}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003486
3487
3488void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3489 u32 cid)
3490{
3491 /* ustorm cxt validation */
3492 cxt->ustorm_ag_context.cdu_usage =
3493 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3494 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3495 /* xcontext validation */
3496 cxt->xstorm_ag_context.cdu_reserved =
3497 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3498 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3499}
3500
3501static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3502 u8 fw_sb_id, u8 sb_index,
3503 u8 ticks)
3504{
3505
3506 u32 addr = BAR_CSTRORM_INTMEM +
3507 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3508 REG_WR8(bp, addr, ticks);
3509 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3510 port, fw_sb_id, sb_index, ticks);
3511}
3512
3513static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3514 u16 fw_sb_id, u8 sb_index,
3515 u8 disable)
3516{
3517 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3518 u32 addr = BAR_CSTRORM_INTMEM +
3519 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3520 u16 flags = REG_RD16(bp, addr);
3521 /* clear and set */
3522 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3523 flags |= enable_flag;
3524 REG_WR16(bp, addr, flags);
3525 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3526 port, fw_sb_id, sb_index, disable);
3527}
3528
3529void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3530 u8 sb_index, u8 disable, u16 usec)
3531{
3532 int port = BP_PORT(bp);
3533 u8 ticks = usec / BNX2X_BTR;
3534
3535 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3536
3537 disable = disable ? 1 : (usec ? 0 : 1);
3538 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3539}