blob: 13dad9230dbc8901c6a1b21cf1ebb55f7c4d7d4f [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Joe Perchesf1deab52011-08-14 12:16:21 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000021#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000023#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000024#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070025#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000026#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000027#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000030#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030032
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000033
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000034/**
35 * bnx2x_bz_fp - zero content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @index: fastpath index to be zeroed
39 *
40 * Makes sure the contents of the bp->fp[index].napi is kept
41 * intact.
42 */
43static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
44{
45 struct bnx2x_fastpath *fp = &bp->fp[index];
46 struct napi_struct orig_napi = fp->napi;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp, 0, sizeof(*fp));
49
50 /* Restore the NAPI object as it has been already initialized */
51 fp->napi = orig_napi;
Ariel Elior6383c0b2011-07-14 08:31:57 +000052
53 fp->bp = bp;
54 fp->index = index;
55 if (IS_ETH_FP(fp))
56 fp->max_cos = bp->max_cos;
57 else
58 /* Special queues support only one CoS */
59 fp->max_cos = 1;
60
61 /*
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
64 */
65 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
66
67#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -070068 /* We don't want TPA on an FCoE L2 ring */
69 if (IS_FCOE_FP(fp))
70 fp->disable_tpa = 1;
Ariel Elior6383c0b2011-07-14 08:31:57 +000071#endif
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000072}
73
74/**
75 * bnx2x_move_fp - move content of the fastpath structure.
76 *
77 * @bp: driver handle
78 * @from: source FP index
79 * @to: destination FP index
80 *
81 * Makes sure the contents of the bp->fp[to].napi is kept
Ariel Elior72754082011-11-13 04:34:31 +000082 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000085 */
86static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
87{
88 struct bnx2x_fastpath *from_fp = &bp->fp[from];
89 struct bnx2x_fastpath *to_fp = &bp->fp[to];
Ariel Elior72754082011-11-13 04:34:31 +000090
91 /* Copy the NAPI object as it has been already initialized */
92 from_fp->napi = to_fp->napi;
93
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000094 /* Move bnx2x_fastpath contents */
95 memcpy(to_fp, from_fp, sizeof(*to_fp));
96 to_fp->index = to;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000097}
98
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030099int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
100
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000101/* free skb in the packet ring at pos idx
102 * return idx of last bd freed
103 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000104static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000105 u16 idx)
106{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000107 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000108 struct eth_tx_start_bd *tx_start_bd;
109 struct eth_tx_bd *tx_data_bd;
110 struct sk_buff *skb = tx_buf->skb;
111 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
112 int nbd;
113
114 /* prefetch skb end pointer to speedup dev_kfree_skb() */
115 prefetch(&skb->end);
116
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300117 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000118 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000119
120 /* unmap first bd */
121 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000122 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000123 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000124 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000125
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300126
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000127 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
128#ifdef BNX2X_STOP_ON_ERROR
129 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
130 BNX2X_ERR("BAD nbd!\n");
131 bnx2x_panic();
132 }
133#endif
134 new_cons = nbd + tx_buf->first_bd;
135
136 /* Get the next bd */
137 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
138
139 /* Skip a parse bd... */
140 --nbd;
141 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142
143 /* ...and the TSO split header bd since they have no mapping */
144 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
145 --nbd;
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147 }
148
149 /* now free frags */
150 while (nbd > 0) {
151
152 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000153 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000154 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
155 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
156 if (--nbd)
157 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
158 }
159
160 /* release skb */
161 WARN_ON(!skb);
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000162 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000163 tx_buf->first_bd = 0;
164 tx_buf->skb = NULL;
165
166 return new_cons;
167}
168
Ariel Elior6383c0b2011-07-14 08:31:57 +0000169int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000170{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000171 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000172 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000173
174#ifdef BNX2X_STOP_ON_ERROR
175 if (unlikely(bp->panic))
176 return -1;
177#endif
178
Ariel Elior6383c0b2011-07-14 08:31:57 +0000179 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
180 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
181 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000182
183 while (sw_cons != hw_cons) {
184 u16 pkt_cons;
185
186 pkt_cons = TX_BD(sw_cons);
187
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000188 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
189 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000190 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000191
Ariel Elior6383c0b2011-07-14 08:31:57 +0000192 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000193 sw_cons++;
194 }
195
Ariel Elior6383c0b2011-07-14 08:31:57 +0000196 txdata->tx_pkt_cons = sw_cons;
197 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000198
199 /* Need to make the tx_bd_cons update visible to start_xmit()
200 * before checking for netif_tx_queue_stopped(). Without the
201 * memory barrier, there is a small possibility that
202 * start_xmit() will miss it and cause the queue to be stopped
203 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300204 * On the other hand we need an rmb() here to ensure the proper
205 * ordering of bit testing in the following
206 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000207 */
208 smp_mb();
209
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000210 if (unlikely(netif_tx_queue_stopped(txq))) {
211 /* Taking tx_lock() is needed to prevent reenabling the queue
212 * while it's empty. This could have happen if rx_action() gets
213 * suspended in bnx2x_tx_int() after the condition before
214 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
215 *
216 * stops the queue->sees fresh tx_bd_cons->releases the queue->
217 * sends some packets consuming the whole queue again->
218 * stops the queue
219 */
220
221 __netif_tx_lock(txq, smp_processor_id());
222
223 if ((netif_tx_queue_stopped(txq)) &&
224 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000225 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000226 netif_tx_wake_queue(txq);
227
228 __netif_tx_unlock(txq);
229 }
230 return 0;
231}
232
233static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
234 u16 idx)
235{
236 u16 last_max = fp->last_max_sge;
237
238 if (SUB_S16(idx, last_max) > 0)
239 fp->last_max_sge = idx;
240}
241
242static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
243 struct eth_fast_path_rx_cqe *fp_cqe)
244{
245 struct bnx2x *bp = fp->bp;
246 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
247 le16_to_cpu(fp_cqe->len_on_bd)) >>
248 SGE_PAGE_SHIFT;
249 u16 last_max, last_elem, first_elem;
250 u16 delta = 0;
251 u16 i;
252
253 if (!sge_len)
254 return;
255
256 /* First mark all used pages */
257 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300258 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000259 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000260
261 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000262 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000263
264 /* Here we assume that the last SGE index is the biggest */
265 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000266 bnx2x_update_last_max_sge(fp,
267 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268
269 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300270 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
271 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000272
273 /* If ring is not full */
274 if (last_elem + 1 != first_elem)
275 last_elem++;
276
277 /* Now update the prod */
278 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
279 if (likely(fp->sge_mask[i]))
280 break;
281
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300282 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
283 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000284 }
285
286 if (delta > 0) {
287 fp->rx_sge_prod += delta;
288 /* clear page-end entries */
289 bnx2x_clear_sge_mask_next_elems(fp);
290 }
291
292 DP(NETIF_MSG_RX_STATUS,
293 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
294 fp->last_max_sge, fp->rx_sge_prod);
295}
296
297static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300298 struct sk_buff *skb, u16 cons, u16 prod,
299 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000300{
301 struct bnx2x *bp = fp->bp;
302 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
303 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
304 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
305 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300306 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
307 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000308
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300309 /* print error if current state != stop */
310 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000311 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
312
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300313 /* Try to map an empty skb from the aggregation info */
314 mapping = dma_map_single(&bp->pdev->dev,
315 first_buf->skb->data,
316 fp->rx_buf_size, DMA_FROM_DEVICE);
317 /*
318 * ...if it fails - move the skb from the consumer to the producer
319 * and set the current aggregation state as ERROR to drop it
320 * when TPA_STOP arrives.
321 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000322
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300323 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
324 /* Move the BD from the consumer to the producer */
325 bnx2x_reuse_rx_skb(fp, cons, prod);
326 tpa_info->tpa_state = BNX2X_TPA_ERROR;
327 return;
328 }
329
330 /* move empty skb from pool to prod */
331 prod_rx_buf->skb = first_buf->skb;
332 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000333 /* point prod_bd to new skb */
334 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
335 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
336
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300337 /* move partial skb from cons to pool (don't unmap yet) */
338 *first_buf = *cons_rx_buf;
339
340 /* mark bin state as START */
341 tpa_info->parsing_flags =
342 le16_to_cpu(cqe->pars_flags.flags);
343 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
344 tpa_info->tpa_state = BNX2X_TPA_START;
345 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
346 tpa_info->placement_offset = cqe->placement_offset;
347
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000348#ifdef BNX2X_STOP_ON_ERROR
349 fp->tpa_queue_used |= (1 << queue);
350#ifdef _ASM_GENERIC_INT_L64_H
351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
352#else
353 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
354#endif
355 fp->tpa_queue_used);
356#endif
357}
358
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000359/* Timestamp option length allowed for TPA aggregation:
360 *
361 * nop nop kind length echo val
362 */
363#define TPA_TSTAMP_OPT_LEN 12
364/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000365 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000366 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000367 * @bp: driver handle
368 * @parsing_flags: parsing flags from the START CQE
369 * @len_on_bd: total length of the first packet for the
370 * aggregation.
371 *
372 * Approximate value of the MSS for this aggregation calculated using
373 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000374 */
375static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
376 u16 len_on_bd)
377{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 /*
379 * TPA arrgregation won't have either IP options or TCP options
380 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000381 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300382 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
383
384 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
385 PRS_FLAG_OVERETH_IPV6)
386 hdrs_len += sizeof(struct ipv6hdr);
387 else /* IPv4 */
388 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000389
390
391 /* Check if there was a TCP timestamp, if there is it's will
392 * always be 12 bytes length: nop nop kind length echo val.
393 *
394 * Otherwise FW would close the aggregation.
395 */
396 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
397 hdrs_len += TPA_TSTAMP_OPT_LEN;
398
399 return len_on_bd - hdrs_len;
400}
401
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000402static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300403 u16 queue, struct sk_buff *skb,
404 struct eth_end_agg_rx_cqe *cqe,
405 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000406{
407 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000408 u32 i, frag_len, frag_size, pages;
409 int err;
410 int j;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300411 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
412 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000413
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300414 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000415 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
416
417 /* This is needed in order to enable forwarding support */
418 if (frag_size)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300419 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
420 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000421
422#ifdef BNX2X_STOP_ON_ERROR
423 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
424 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
425 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300426 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000427 bnx2x_panic();
428 return -EINVAL;
429 }
430#endif
431
432 /* Run through the SGL and compose the fragmented skb */
433 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300434 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000435
436 /* FW gives the indices of the SGE as if the ring is an array
437 (meaning that "next" element will consume 2 indices) */
438 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
439 rx_pg = &fp->rx_page_ring[sge_idx];
440 old_rx_pg = *rx_pg;
441
442 /* If we fail to allocate a substitute page, we simply stop
443 where we are and drop the whole packet */
444 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
445 if (unlikely(err)) {
446 fp->eth_q_stats.rx_skb_alloc_failed++;
447 return err;
448 }
449
450 /* Unmap the page as we r going to pass it to the stack */
451 dma_unmap_page(&bp->pdev->dev,
452 dma_unmap_addr(&old_rx_pg, mapping),
453 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
454
455 /* Add one frag and update the appropriate fields in the skb */
456 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
457
458 skb->data_len += frag_len;
Eric Dumazete1ac50f2011-10-19 23:00:23 +0000459 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000460 skb->len += frag_len;
461
462 frag_size -= frag_len;
463 }
464
465 return 0;
466}
467
468static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300469 u16 queue, struct eth_end_agg_rx_cqe *cqe,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000470 u16 cqe_idx)
471{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300472 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
473 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
474 u8 pad = tpa_info->placement_offset;
475 u16 len = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000476 struct sk_buff *skb = rx_buf->skb;
477 /* alloc new skb */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300478 struct sk_buff *new_skb;
479 u8 old_tpa_state = tpa_info->tpa_state;
480
481 tpa_info->tpa_state = BNX2X_TPA_STOP;
482
483 /* If we there was an error during the handling of the TPA_START -
484 * drop this aggregation.
485 */
486 if (old_tpa_state == BNX2X_TPA_ERROR)
487 goto drop;
488
489 /* Try to allocate the new skb */
490 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000491
492 /* Unmap skb in the pool anyway, as we are going to change
493 pool entry status to BNX2X_TPA_STOP even if new skb allocation
494 fails. */
495 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800496 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497
498 if (likely(new_skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000499 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000500 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000501
502#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800503 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000504 BNX2X_ERR("skb_put is about to fail... "
505 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800506 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000507 bnx2x_panic();
508 return;
509 }
510#endif
511
512 skb_reserve(skb, pad);
513 skb_put(skb, len);
514
515 skb->protocol = eth_type_trans(skb, bp->dev);
516 skb->ip_summed = CHECKSUM_UNNECESSARY;
517
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300518 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
519 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
520 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000521 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000522 } else {
523 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
524 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000525 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000526 }
527
528
529 /* put new skb in bin */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300530 rx_buf->skb = new_skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000531
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300532 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000533 }
534
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300535drop:
536 /* drop the packet and keep the buffer in the bin */
537 DP(NETIF_MSG_RX_STATUS,
538 "Failed to allocate or map a new skb - dropping packet!\n");
539 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000540}
541
542/* Set Toeplitz hash value in the skb using the value from the
543 * CQE (calculated by HW).
544 */
545static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
546 struct sk_buff *skb)
547{
548 /* Set Toeplitz hash from CQE */
549 if ((bp->dev->features & NETIF_F_RXHASH) &&
550 (cqe->fast_path_cqe.status_flags &
551 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
552 skb->rxhash =
553 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
554}
555
556int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
557{
558 struct bnx2x *bp = fp->bp;
559 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
560 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
561 int rx_pkt = 0;
562
563#ifdef BNX2X_STOP_ON_ERROR
564 if (unlikely(bp->panic))
565 return 0;
566#endif
567
568 /* CQ "next element" is of the size of the regular element,
569 that's why it's ok here */
570 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
571 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
572 hw_comp_cons++;
573
574 bd_cons = fp->rx_bd_cons;
575 bd_prod = fp->rx_bd_prod;
576 bd_prod_fw = bd_prod;
577 sw_comp_cons = fp->rx_comp_cons;
578 sw_comp_prod = fp->rx_comp_prod;
579
580 /* Memory barrier necessary as speculative reads of the rx
581 * buffer can be ahead of the index in the status block
582 */
583 rmb();
584
585 DP(NETIF_MSG_RX_STATUS,
586 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
587 fp->index, hw_comp_cons, sw_comp_cons);
588
589 while (sw_comp_cons != hw_comp_cons) {
590 struct sw_rx_bd *rx_buf = NULL;
591 struct sk_buff *skb;
592 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300593 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000594 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300595 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000596 u16 len, pad;
597
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300598#ifdef BNX2X_STOP_ON_ERROR
599 if (unlikely(bp->panic))
600 return 0;
601#endif
602
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000603 comp_ring_cons = RCQ_BD(sw_comp_cons);
604 bd_prod = RX_BD(bd_prod);
605 bd_cons = RX_BD(bd_cons);
606
607 /* Prefetch the page containing the BD descriptor
608 at producer's index. It will be needed when new skb is
609 allocated */
610 prefetch((void *)(PAGE_ALIGN((unsigned long)
611 (&fp->rx_desc_ring[bd_prod])) -
612 PAGE_SIZE + 1));
613
614 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300615 cqe_fp = &cqe->fast_path_cqe;
616 cqe_fp_flags = cqe_fp->type_error_flags;
617 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000618
619 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
620 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300621 cqe_fp_flags, cqe_fp->status_flags,
622 le32_to_cpu(cqe_fp->rss_hash_result),
623 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000624
625 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300626 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000627 bnx2x_sp_event(fp, cqe);
628 goto next_cqe;
629
630 /* this is an rx packet */
631 } else {
632 rx_buf = &fp->rx_buf_ring[bd_cons];
633 skb = rx_buf->skb;
634 prefetch(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000635
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300636 if (!CQE_TYPE_FAST(cqe_fp_type)) {
637#ifdef BNX2X_STOP_ON_ERROR
638 /* sanity check */
639 if (fp->disable_tpa &&
640 (CQE_TYPE_START(cqe_fp_type) ||
641 CQE_TYPE_STOP(cqe_fp_type)))
642 BNX2X_ERR("START/STOP packet while "
643 "disable_tpa type %x\n",
644 CQE_TYPE(cqe_fp_type));
645#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000646
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300647 if (CQE_TYPE_START(cqe_fp_type)) {
648 u16 queue = cqe_fp->queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000649 DP(NETIF_MSG_RX_STATUS,
650 "calling tpa_start on queue %d\n",
651 queue);
652
653 bnx2x_tpa_start(fp, queue, skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300654 bd_cons, bd_prod,
655 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000656
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300657 /* Set Toeplitz hash for LRO skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000658 bnx2x_set_skb_rxhash(bp, cqe, skb);
659
660 goto next_rx;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300661
662 } else {
663 u16 queue =
664 cqe->end_agg_cqe.queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000665 DP(NETIF_MSG_RX_STATUS,
666 "calling tpa_stop on queue %d\n",
667 queue);
668
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300669 bnx2x_tpa_stop(bp, fp, queue,
670 &cqe->end_agg_cqe,
671 comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000672#ifdef BNX2X_STOP_ON_ERROR
673 if (bp->panic)
674 return 0;
675#endif
676
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300677 bnx2x_update_sge_prod(fp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000678 goto next_cqe;
679 }
680 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300681 /* non TPA */
682 len = le16_to_cpu(cqe_fp->pkt_len);
683 pad = cqe_fp->placement_offset;
Vladislav Zolotarov9924caf2011-07-19 01:37:42 +0000684 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000685 dma_unmap_addr(rx_buf, mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300686 pad + RX_COPY_THRESH,
687 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000688 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000689
690 /* is this an error packet? */
691 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
692 DP(NETIF_MSG_RX_ERR,
693 "ERROR flags %x rx packet %u\n",
694 cqe_fp_flags, sw_comp_cons);
695 fp->eth_q_stats.rx_err_discard_pkt++;
696 goto reuse_rx;
697 }
698
699 /* Since we don't have a jumbo ring
700 * copy small packets if mtu > 1500
701 */
702 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
703 (len <= RX_COPY_THRESH)) {
704 struct sk_buff *new_skb;
705
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300706 new_skb = netdev_alloc_skb(bp->dev, len + pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000707 if (new_skb == NULL) {
708 DP(NETIF_MSG_RX_ERR,
709 "ERROR packet dropped "
710 "because of alloc failure\n");
711 fp->eth_q_stats.rx_skb_alloc_failed++;
712 goto reuse_rx;
713 }
714
715 /* aligned copy */
716 skb_copy_from_linear_data_offset(skb, pad,
717 new_skb->data + pad, len);
718 skb_reserve(new_skb, pad);
719 skb_put(new_skb, len);
720
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000721 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000722
723 skb = new_skb;
724
725 } else
726 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
727 dma_unmap_single(&bp->pdev->dev,
728 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800729 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000730 DMA_FROM_DEVICE);
731 skb_reserve(skb, pad);
732 skb_put(skb, len);
733
734 } else {
735 DP(NETIF_MSG_RX_ERR,
736 "ERROR packet dropped because "
737 "of alloc failure\n");
738 fp->eth_q_stats.rx_skb_alloc_failed++;
739reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000740 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 goto next_rx;
742 }
743
744 skb->protocol = eth_type_trans(skb, bp->dev);
745
746 /* Set Toeplitz hash for a none-LRO skb */
747 bnx2x_set_skb_rxhash(bp, cqe, skb);
748
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700749 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000750
Michał Mirosław66371c42011-04-12 09:38:23 +0000751 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300752
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000753 if (likely(BNX2X_RX_CSUM_OK(cqe)))
754 skb->ip_summed = CHECKSUM_UNNECESSARY;
755 else
756 fp->eth_q_stats.hw_csum_err++;
757 }
758 }
759
Dmitry Kravkovf233caf2011-11-13 04:34:22 +0000760 skb_record_rx_queue(skb, fp->rx_queue);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000761
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300762 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
763 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000764 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300765 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000766 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000767
768
769next_rx:
770 rx_buf->skb = NULL;
771
772 bd_cons = NEXT_RX_IDX(bd_cons);
773 bd_prod = NEXT_RX_IDX(bd_prod);
774 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
775 rx_pkt++;
776next_cqe:
777 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
778 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
779
780 if (rx_pkt == budget)
781 break;
782 } /* while */
783
784 fp->rx_bd_cons = bd_cons;
785 fp->rx_bd_prod = bd_prod_fw;
786 fp->rx_comp_cons = sw_comp_cons;
787 fp->rx_comp_prod = sw_comp_prod;
788
789 /* Update producers */
790 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
791 fp->rx_sge_prod);
792
793 fp->rx_pkt += rx_pkt;
794 fp->rx_calls++;
795
796 return rx_pkt;
797}
798
799static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
800{
801 struct bnx2x_fastpath *fp = fp_cookie;
802 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000803 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000804
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000805 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
806 "[fp %d fw_sd %d igusb %d]\n",
807 fp->index, fp->fw_sb_id, fp->igu_sb_id);
808 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000809
810#ifdef BNX2X_STOP_ON_ERROR
811 if (unlikely(bp->panic))
812 return IRQ_HANDLED;
813#endif
814
815 /* Handle Rx and Tx according to MSI-X vector */
816 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000817
818 for_each_cos_in_tx_queue(fp, cos)
819 prefetch(fp->txdata[cos].tx_cons_sb);
820
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000821 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000822 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
823
824 return IRQ_HANDLED;
825}
826
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000827/* HW Lock for shared dual port PHYs */
828void bnx2x_acquire_phy_lock(struct bnx2x *bp)
829{
830 mutex_lock(&bp->port.phy_mutex);
831
832 if (bp->port.need_hw_lock)
833 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
834}
835
836void bnx2x_release_phy_lock(struct bnx2x *bp)
837{
838 if (bp->port.need_hw_lock)
839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
840
841 mutex_unlock(&bp->port.phy_mutex);
842}
843
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800844/* calculates MF speed according to current linespeed and MF configuration */
845u16 bnx2x_get_mf_speed(struct bnx2x *bp)
846{
847 u16 line_speed = bp->link_vars.line_speed;
848 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000849 u16 maxCfg = bnx2x_extract_max_cfg(bp,
850 bp->mf_config[BP_VN(bp)]);
851
852 /* Calculate the current MAX line speed limit for the MF
853 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800854 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000855 if (IS_MF_SI(bp))
856 line_speed = (line_speed * maxCfg) / 100;
857 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800858 u16 vn_max_rate = maxCfg * 100;
859
860 if (vn_max_rate < line_speed)
861 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000862 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800863 }
864
865 return line_speed;
866}
867
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000868/**
869 * bnx2x_fill_report_data - fill link report data to report
870 *
871 * @bp: driver handle
872 * @data: link state to update
873 *
874 * It uses a none-atomic bit operations because is called under the mutex.
875 */
876static inline void bnx2x_fill_report_data(struct bnx2x *bp,
877 struct bnx2x_link_report_data *data)
878{
879 u16 line_speed = bnx2x_get_mf_speed(bp);
880
881 memset(data, 0, sizeof(*data));
882
883 /* Fill the report data: efective line speed */
884 data->line_speed = line_speed;
885
886 /* Link is down */
887 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
888 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
889 &data->link_report_flags);
890
891 /* Full DUPLEX */
892 if (bp->link_vars.duplex == DUPLEX_FULL)
893 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
894
895 /* Rx Flow Control is ON */
896 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
897 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
898
899 /* Tx Flow Control is ON */
900 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
901 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
902}
903
904/**
905 * bnx2x_link_report - report link status to OS.
906 *
907 * @bp: driver handle
908 *
909 * Calls the __bnx2x_link_report() under the same locking scheme
910 * as a link/PHY state managing code to ensure a consistent link
911 * reporting.
912 */
913
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000914void bnx2x_link_report(struct bnx2x *bp)
915{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000916 bnx2x_acquire_phy_lock(bp);
917 __bnx2x_link_report(bp);
918 bnx2x_release_phy_lock(bp);
919}
920
921/**
922 * __bnx2x_link_report - report link status to OS.
923 *
924 * @bp: driver handle
925 *
926 * None atomic inmlementation.
927 * Should be called under the phy_lock.
928 */
929void __bnx2x_link_report(struct bnx2x *bp)
930{
931 struct bnx2x_link_report_data cur_data;
932
933 /* reread mf_cfg */
934 if (!CHIP_IS_E1(bp))
935 bnx2x_read_mf_cfg(bp);
936
937 /* Read the current link report info */
938 bnx2x_fill_report_data(bp, &cur_data);
939
940 /* Don't report link down or exactly the same link status twice */
941 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
942 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
943 &bp->last_reported_link.link_report_flags) &&
944 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
945 &cur_data.link_report_flags)))
946 return;
947
948 bp->link_cnt++;
949
950 /* We are going to report a new link parameters now -
951 * remember the current data for the next time.
952 */
953 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
954
955 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
956 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000957 netif_carrier_off(bp->dev);
958 netdev_err(bp->dev, "NIC Link is Down\n");
959 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000960 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000961 const char *duplex;
962 const char *flow;
963
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000964 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000965
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000966 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
967 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000968 duplex = "full";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000969 else
Joe Perches94f05b02011-08-14 12:16:20 +0000970 duplex = "half";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000971
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000972 /* Handle the FC at the end so that only these flags would be
973 * possibly set. This way we may easily check if there is no FC
974 * enabled.
975 */
976 if (cur_data.link_report_flags) {
977 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
978 &cur_data.link_report_flags)) {
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000979 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
980 &cur_data.link_report_flags))
Joe Perches94f05b02011-08-14 12:16:20 +0000981 flow = "ON - receive & transmit";
982 else
983 flow = "ON - receive";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984 } else {
Joe Perches94f05b02011-08-14 12:16:20 +0000985 flow = "ON - transmit";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000986 }
Joe Perches94f05b02011-08-14 12:16:20 +0000987 } else {
988 flow = "none";
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000989 }
Joe Perches94f05b02011-08-14 12:16:20 +0000990 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
991 cur_data.line_speed, duplex, flow);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000992 }
993}
994
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000995void bnx2x_init_rx_rings(struct bnx2x *bp)
996{
997 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000998 u16 ring_prod;
999 int i, j;
1000
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001001 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001002 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001003 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001004
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001005 DP(NETIF_MSG_IFUP,
1006 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1007
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001008 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001009 /* Fill the per-aggregtion pool */
David S. Miller8decf862011-09-22 03:23:13 -04001010 for (i = 0; i < MAX_AGG_QS(bp); i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001011 struct bnx2x_agg_info *tpa_info =
1012 &fp->tpa_info[i];
1013 struct sw_rx_bd *first_buf =
1014 &tpa_info->first_buf;
1015
1016 first_buf->skb = netdev_alloc_skb(bp->dev,
1017 fp->rx_buf_size);
1018 if (!first_buf->skb) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001019 BNX2X_ERR("Failed to allocate TPA "
1020 "skb pool for queue[%d] - "
1021 "disabling TPA on this "
1022 "queue!\n", j);
1023 bnx2x_free_tpa_pool(bp, fp, i);
1024 fp->disable_tpa = 1;
1025 break;
1026 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001027 dma_unmap_addr_set(first_buf, mapping, 0);
1028 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001029 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001030
1031 /* "next page" elements initialization */
1032 bnx2x_set_next_page_sgl(fp);
1033
1034 /* set SGEs bit mask */
1035 bnx2x_init_sge_ring_bit_mask(fp);
1036
1037 /* Allocate SGEs and initialize the ring elements */
1038 for (i = 0, ring_prod = 0;
1039 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1040
1041 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1042 BNX2X_ERR("was only able to allocate "
1043 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001044 BNX2X_ERR("disabling TPA for "
1045 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001046 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001047 bnx2x_free_rx_sge_range(bp, fp,
1048 ring_prod);
1049 bnx2x_free_tpa_pool(bp, fp,
David S. Miller8decf862011-09-22 03:23:13 -04001050 MAX_AGG_QS(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001051 fp->disable_tpa = 1;
1052 ring_prod = 0;
1053 break;
1054 }
1055 ring_prod = NEXT_SGE_IDX(ring_prod);
1056 }
1057
1058 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001059 }
1060 }
1061
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001062 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001063 struct bnx2x_fastpath *fp = &bp->fp[j];
1064
1065 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001066
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001067 /* Activate BD ring */
1068 /* Warning!
1069 * this will generate an interrupt (to the TSTORM)
1070 * must only be done after chip is initialized
1071 */
1072 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1073 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001074
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001075 if (j != 0)
1076 continue;
1077
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001078 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001079 REG_WR(bp, BAR_USTRORM_INTMEM +
1080 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1081 U64_LO(fp->rx_comp_mapping));
1082 REG_WR(bp, BAR_USTRORM_INTMEM +
1083 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1084 U64_HI(fp->rx_comp_mapping));
1085 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086 }
1087}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001088
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1090{
1091 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001092 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001093
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001094 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001095 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001096 for_each_cos_in_tx_queue(fp, cos) {
1097 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001098
Ariel Elior6383c0b2011-07-14 08:31:57 +00001099 u16 sw_prod = txdata->tx_pkt_prod;
1100 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001101
Ariel Elior6383c0b2011-07-14 08:31:57 +00001102 while (sw_cons != sw_prod) {
Dmitry Kravkovad756592011-11-13 04:34:23 +00001103 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons));
Ariel Elior6383c0b2011-07-14 08:31:57 +00001104 sw_cons++;
1105 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001106 }
1107 }
1108}
1109
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001110static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1111{
1112 struct bnx2x *bp = fp->bp;
1113 int i;
1114
1115 /* ring wasn't allocated */
1116 if (fp->rx_buf_ring == NULL)
1117 return;
1118
1119 for (i = 0; i < NUM_RX_BD; i++) {
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1121 struct sk_buff *skb = rx_buf->skb;
1122
1123 if (skb == NULL)
1124 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001125 dma_unmap_single(&bp->pdev->dev,
1126 dma_unmap_addr(rx_buf, mapping),
1127 fp->rx_buf_size, DMA_FROM_DEVICE);
1128
1129 rx_buf->skb = NULL;
1130 dev_kfree_skb(skb);
1131 }
1132}
1133
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001134static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1135{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001136 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001137
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001138 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001139 struct bnx2x_fastpath *fp = &bp->fp[j];
1140
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001141 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001142
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143 if (!fp->disable_tpa)
David S. Miller8decf862011-09-22 03:23:13 -04001144 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001145 }
1146}
1147
1148void bnx2x_free_skbs(struct bnx2x *bp)
1149{
1150 bnx2x_free_tx_skbs(bp);
1151 bnx2x_free_rx_skbs(bp);
1152}
1153
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001154void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1155{
1156 /* load old values */
1157 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1158
1159 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1160 /* leave all but MAX value */
1161 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1162
1163 /* set new MAX value */
1164 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1165 & FUNC_MF_CFG_MAX_BW_MASK;
1166
1167 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1168 }
1169}
1170
Dmitry Kravkovca924292011-06-14 01:33:08 +00001171/**
1172 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1173 *
1174 * @bp: driver handle
1175 * @nvecs: number of vectors to be released
1176 */
1177static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001179 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001180
Dmitry Kravkovca924292011-06-14 01:33:08 +00001181 if (nvecs == offset)
1182 return;
1183 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001184 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001185 bp->msix_table[offset].vector);
1186 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001188 if (nvecs == offset)
1189 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001190 offset++;
1191#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001192
Dmitry Kravkovca924292011-06-14 01:33:08 +00001193 for_each_eth_queue(bp, i) {
1194 if (nvecs == offset)
1195 return;
1196 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1197 "irq\n", i, bp->msix_table[offset].vector);
1198
1199 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001200 }
1201}
1202
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001203void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001205 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001206 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001207 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001208 else if (bp->flags & USING_MSI_FLAG)
1209 free_irq(bp->pdev->irq, bp->dev);
1210 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001211 free_irq(bp->pdev->irq, bp->dev);
1212}
1213
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001214int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001216 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001218 bp->msix_table[msix_vec].entry = msix_vec;
1219 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1220 bp->msix_table[0].entry);
1221 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001222
1223#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001224 bp->msix_table[msix_vec].entry = msix_vec;
1225 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1226 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1227 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001228#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001229 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001230 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001231 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001233 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1234 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001235 }
1236
Ariel Elior6383c0b2011-07-14 08:31:57 +00001237 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001238
1239 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001240
1241 /*
1242 * reconfigure number of tx/rx queues according to available
1243 * MSI-X vectors
1244 */
1245 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001246 /* how less vectors we will have? */
1247 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001248
1249 DP(NETIF_MSG_IFUP,
1250 "Trying to use less MSI-X vectors: %d\n", rc);
1251
1252 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1253
1254 if (rc) {
1255 DP(NETIF_MSG_IFUP,
1256 "MSI-X is not attainable rc %d\n", rc);
1257 return rc;
1258 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001259 /*
1260 * decrease number of queues by number of unallocated entries
1261 */
1262 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001263
1264 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1265 bp->num_queues);
1266 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001267 /* fall to INTx if not enough memory */
1268 if (rc == -ENOMEM)
1269 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001270 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1271 return rc;
1272 }
1273
1274 bp->flags |= USING_MSIX_FLAG;
1275
1276 return 0;
1277}
1278
1279static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1280{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001281 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001282
Dmitry Kravkovca924292011-06-14 01:33:08 +00001283 rc = request_irq(bp->msix_table[offset++].vector,
1284 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001285 bp->dev->name, bp->dev);
1286 if (rc) {
1287 BNX2X_ERR("request sp irq failed\n");
1288 return -EBUSY;
1289 }
1290
1291#ifdef BCM_CNIC
1292 offset++;
1293#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001294 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295 struct bnx2x_fastpath *fp = &bp->fp[i];
1296 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1297 bp->dev->name, i);
1298
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001299 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300 bnx2x_msix_fp_int, 0, fp->name, fp);
1301 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001302 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1303 bp->msix_table[offset].vector, rc);
1304 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001305 return -EBUSY;
1306 }
1307
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001308 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001309 }
1310
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001311 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001312 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001313 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1314 " ... fp[%d] %d\n",
1315 bp->msix_table[0].vector,
1316 0, bp->msix_table[offset].vector,
1317 i - 1, bp->msix_table[offset + i - 1].vector);
1318
1319 return 0;
1320}
1321
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001322int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001323{
1324 int rc;
1325
1326 rc = pci_enable_msi(bp->pdev);
1327 if (rc) {
1328 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1329 return -1;
1330 }
1331 bp->flags |= USING_MSI_FLAG;
1332
1333 return 0;
1334}
1335
1336static int bnx2x_req_irq(struct bnx2x *bp)
1337{
1338 unsigned long flags;
1339 int rc;
1340
1341 if (bp->flags & USING_MSI_FLAG)
1342 flags = 0;
1343 else
1344 flags = IRQF_SHARED;
1345
1346 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1347 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001348 return rc;
1349}
1350
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001351static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1352{
1353 int rc = 0;
1354 if (bp->flags & USING_MSIX_FLAG) {
1355 rc = bnx2x_req_msix_irqs(bp);
1356 if (rc)
1357 return rc;
1358 } else {
1359 bnx2x_ack_int(bp);
1360 rc = bnx2x_req_irq(bp);
1361 if (rc) {
1362 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1363 return rc;
1364 }
1365 if (bp->flags & USING_MSI_FLAG) {
1366 bp->dev->irq = bp->pdev->irq;
1367 netdev_info(bp->dev, "using MSI IRQ %d\n",
1368 bp->pdev->irq);
1369 }
1370 }
1371
1372 return 0;
1373}
1374
1375static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001376{
1377 int i;
1378
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001379 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001380 napi_enable(&bnx2x_fp(bp, i, napi));
1381}
1382
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001383static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384{
1385 int i;
1386
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001387 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001388 napi_disable(&bnx2x_fp(bp, i, napi));
1389}
1390
1391void bnx2x_netif_start(struct bnx2x *bp)
1392{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001393 if (netif_running(bp->dev)) {
1394 bnx2x_napi_enable(bp);
1395 bnx2x_int_enable(bp);
1396 if (bp->state == BNX2X_STATE_OPEN)
1397 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398 }
1399}
1400
1401void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1402{
1403 bnx2x_int_disable_sync(bp, disable_hw);
1404 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001405}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001406
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001407u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1408{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001409 struct bnx2x *bp = netdev_priv(dev);
David S. Miller823dcd22011-08-20 10:39:12 -07001410
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001411#ifdef BCM_CNIC
David S. Miller823dcd22011-08-20 10:39:12 -07001412 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001413 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1414 u16 ether_type = ntohs(hdr->h_proto);
1415
1416 /* Skip VLAN tag if present */
1417 if (ether_type == ETH_P_8021Q) {
1418 struct vlan_ethhdr *vhdr =
1419 (struct vlan_ethhdr *)skb->data;
1420
1421 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1422 }
1423
1424 /* If ethertype is FCoE or FIP - use FCoE ring */
1425 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001426 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001427 }
1428#endif
David S. Miller823dcd22011-08-20 10:39:12 -07001429 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001430 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001431}
1432
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001433void bnx2x_set_num_queues(struct bnx2x *bp)
1434{
1435 switch (bp->multi_mode) {
1436 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001437 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001438 break;
1439 case ETH_RSS_MODE_REGULAR:
1440 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001441 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001442
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001444 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001445 break;
1446 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001447
1448 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001449 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001450}
1451
David S. Miller823dcd22011-08-20 10:39:12 -07001452/**
1453 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1454 *
1455 * @bp: Driver handle
1456 *
1457 * We currently support for at most 16 Tx queues for each CoS thus we will
1458 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1459 * bp->max_cos.
1460 *
1461 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1462 * index after all ETH L2 indices.
1463 *
1464 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1465 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1466 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1467 *
1468 * The proper configuration of skb->queue_mapping is handled by
1469 * bnx2x_select_queue() and __skb_tx_hash().
1470 *
1471 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1472 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1473 */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001474static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1475{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001476 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001477
Ariel Elior6383c0b2011-07-14 08:31:57 +00001478 tx = MAX_TXQS_PER_COS * bp->max_cos;
1479 rx = BNX2X_NUM_ETH_QUEUES(bp);
1480
1481/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001482#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001483 if (!NO_FCOE(bp)) {
1484 rx += FCOE_PRESENT;
1485 tx += FCOE_PRESENT;
1486 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001487#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001488
1489 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1490 if (rc) {
1491 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1492 return rc;
1493 }
1494 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1495 if (rc) {
1496 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1497 return rc;
1498 }
1499
1500 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1501 tx, rx);
1502
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001503 return rc;
1504}
1505
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001506static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1507{
1508 int i;
1509
1510 for_each_queue(bp, i) {
1511 struct bnx2x_fastpath *fp = &bp->fp[i];
1512
1513 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1514 if (IS_FCOE_IDX(i))
1515 /*
1516 * Although there are no IP frames expected to arrive to
1517 * this ring we still want to add an
1518 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1519 * overrun attack.
1520 */
1521 fp->rx_buf_size =
1522 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001523 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001524 else
1525 fp->rx_buf_size =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001526 bp->dev->mtu + ETH_OVREHEAD +
1527 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001528 }
1529}
1530
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001531static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1532{
1533 int i;
1534 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1535 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1536
1537 /*
1538 * Prepare the inital contents fo the indirection table if RSS is
1539 * enabled
1540 */
1541 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1542 for (i = 0; i < sizeof(ind_table); i++)
1543 ind_table[i] =
1544 bp->fp->cl_id + (i % num_eth_queues);
1545 }
1546
1547 /*
1548 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1549 * per-port, so if explicit configuration is needed , do it only
1550 * for a PMF.
1551 *
1552 * For 57712 and newer on the other hand it's a per-function
1553 * configuration.
1554 */
1555 return bnx2x_config_rss_pf(bp, ind_table,
1556 bp->port.pmf || !CHIP_IS_E1x(bp));
1557}
1558
1559int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1560{
1561 struct bnx2x_config_rss_params params = {0};
1562 int i;
1563
1564 /* Although RSS is meaningless when there is a single HW queue we
1565 * still need it enabled in order to have HW Rx hash generated.
1566 *
1567 * if (!is_eth_multi(bp))
1568 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1569 */
1570
1571 params.rss_obj = &bp->rss_conf_obj;
1572
1573 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1574
1575 /* RSS mode */
1576 switch (bp->multi_mode) {
1577 case ETH_RSS_MODE_DISABLED:
1578 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1579 break;
1580 case ETH_RSS_MODE_REGULAR:
1581 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1582 break;
1583 case ETH_RSS_MODE_VLAN_PRI:
1584 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1585 break;
1586 case ETH_RSS_MODE_E1HOV_PRI:
1587 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1588 break;
1589 case ETH_RSS_MODE_IP_DSCP:
1590 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1591 break;
1592 default:
1593 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1594 return -EINVAL;
1595 }
1596
1597 /* If RSS is enabled */
1598 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1599 /* RSS configuration */
1600 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1601 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1602 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1603 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1604
1605 /* Hash bits */
1606 params.rss_result_mask = MULTI_MASK;
1607
1608 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1609
1610 if (config_hash) {
1611 /* RSS keys */
1612 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1613 params.rss_key[i] = random32();
1614
1615 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1616 }
1617 }
1618
1619 return bnx2x_config_rss(bp, &params);
1620}
1621
1622static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1623{
1624 struct bnx2x_func_state_params func_params = {0};
1625
1626 /* Prepare parameters for function state transitions */
1627 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1628
1629 func_params.f_obj = &bp->func_obj;
1630 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1631
1632 func_params.params.hw_init.load_phase = load_code;
1633
1634 return bnx2x_func_state_change(bp, &func_params);
1635}
1636
1637/*
1638 * Cleans the object that have internal lists without sending
1639 * ramrods. Should be run when interrutps are disabled.
1640 */
1641static void bnx2x_squeeze_objects(struct bnx2x *bp)
1642{
1643 int rc;
1644 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1645 struct bnx2x_mcast_ramrod_params rparam = {0};
1646 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1647
1648 /***************** Cleanup MACs' object first *************************/
1649
1650 /* Wait for completion of requested */
1651 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1652 /* Perform a dry cleanup */
1653 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1654
1655 /* Clean ETH primary MAC */
1656 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1657 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1658 &ramrod_flags);
1659 if (rc != 0)
1660 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1661
1662 /* Cleanup UC list */
1663 vlan_mac_flags = 0;
1664 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1665 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1666 &ramrod_flags);
1667 if (rc != 0)
1668 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1669
1670 /***************** Now clean mcast object *****************************/
1671 rparam.mcast_obj = &bp->mcast_obj;
1672 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1673
1674 /* Add a DEL command... */
1675 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1676 if (rc < 0)
1677 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1678 "object: %d\n", rc);
1679
1680 /* ...and wait until all pending commands are cleared */
1681 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1682 while (rc != 0) {
1683 if (rc < 0) {
1684 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1685 rc);
1686 return;
1687 }
1688
1689 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1690 }
1691}
1692
1693#ifndef BNX2X_STOP_ON_ERROR
1694#define LOAD_ERROR_EXIT(bp, label) \
1695 do { \
1696 (bp)->state = BNX2X_STATE_ERROR; \
1697 goto label; \
1698 } while (0)
1699#else
1700#define LOAD_ERROR_EXIT(bp, label) \
1701 do { \
1702 (bp)->state = BNX2X_STATE_ERROR; \
1703 (bp)->panic = 1; \
1704 return -EBUSY; \
1705 } while (0)
1706#endif
1707
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001708/* must be called with rtnl_lock */
1709int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1710{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001711 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001712 u32 load_code;
1713 int i, rc;
1714
1715#ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1717 return -EPERM;
1718#endif
1719
1720 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1721
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001722 /* Set the initial link reported state to link down */
1723 bnx2x_acquire_phy_lock(bp);
1724 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1725 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1726 &bp->last_reported_link.link_report_flags);
1727 bnx2x_release_phy_lock(bp);
1728
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001729 /* must be called before memory allocation and HW init */
1730 bnx2x_ilt_set_info(bp);
1731
Ariel Elior6383c0b2011-07-14 08:31:57 +00001732 /*
1733 * Zero fastpath structures preserving invariants like napi, which are
1734 * allocated only once, fp index, max_cos, bp pointer.
1735 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001736 */
1737 for_each_queue(bp, i)
1738 bnx2x_bz_fp(bp, i);
1739
Ariel Elior6383c0b2011-07-14 08:31:57 +00001740
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001741 /* Set the receive queues buffer size */
1742 bnx2x_set_rx_buf_size(bp);
1743
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001744 if (bnx2x_alloc_mem(bp))
1745 return -ENOMEM;
1746
1747 /* As long as bnx2x_alloc_mem() may possibly update
1748 * bp->num_queues, bnx2x_set_real_num_queues() should always
1749 * come after it.
1750 */
1751 rc = bnx2x_set_real_num_queues(bp);
1752 if (rc) {
1753 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001754 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001755 }
1756
Ariel Elior6383c0b2011-07-14 08:31:57 +00001757 /* configure multi cos mappings in kernel.
1758 * this configuration may be overriden by a multi class queue discipline
1759 * or by a dcbx negotiation result.
1760 */
1761 bnx2x_setup_tc(bp->dev, bp->max_cos);
1762
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001763 bnx2x_napi_enable(bp);
1764
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001765 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001766 * Returns the type of LOAD command:
1767 * if it is the first port to be initialized
1768 * common blocks should be initialized, otherwise - not
1769 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001770 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001771 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772 if (!load_code) {
1773 BNX2X_ERR("MCP response failure, aborting\n");
1774 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001775 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001776 }
1777 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1778 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001779 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001780 }
1781
1782 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001783 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001784
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001785 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1786 path, load_count[path][0], load_count[path][1],
1787 load_count[path][2]);
1788 load_count[path][0]++;
1789 load_count[path][1 + port]++;
1790 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1791 path, load_count[path][0], load_count[path][1],
1792 load_count[path][2]);
1793 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001794 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001795 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001796 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1797 else
1798 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1799 }
1800
1801 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001802 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001803 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001804 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001805 /*
1806 * We need the barrier to ensure the ordering between the
1807 * writing to bp->port.pmf here and reading it from the
1808 * bnx2x_periodic_task().
1809 */
1810 smp_mb();
1811 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1812 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001813 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001814
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001815 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1816
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001817 /* Init Function state controlling object */
1818 bnx2x__init_func_obj(bp);
1819
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001820 /* Initialize HW */
1821 rc = bnx2x_init_hw(bp, load_code);
1822 if (rc) {
1823 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001824 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001825 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001826 }
1827
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001828 /* Connect to IRQs */
1829 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001830 if (rc) {
1831 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001832 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001833 }
1834
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001835 /* Setup NIC internals and enable interrupts */
1836 bnx2x_nic_init(bp, load_code);
1837
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001838 /* Init per-function objects */
1839 bnx2x_init_bp_objs(bp);
1840
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001841 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1842 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001843 (bp->common.shmem2_base)) {
1844 if (SHMEM2_HAS(bp, dcc_support))
1845 SHMEM2_WR(bp, dcc_support,
1846 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1847 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1848 }
1849
1850 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1851 rc = bnx2x_func_start(bp);
1852 if (rc) {
1853 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001854 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001855 LOAD_ERROR_EXIT(bp, load_error3);
1856 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001857
1858 /* Send LOAD_DONE command to MCP */
1859 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001860 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001861 if (!load_code) {
1862 BNX2X_ERR("MCP response failure, aborting\n");
1863 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001864 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001865 }
1866 }
1867
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001868 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001869 if (rc) {
1870 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001871 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001872 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001873
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001874#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001875 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001876 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001877#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001878
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001879 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001880 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001881 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001882 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883 }
1884
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001885 rc = bnx2x_init_rss_pf(bp);
1886 if (rc)
1887 LOAD_ERROR_EXIT(bp, load_error4);
1888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001889 /* Now when Clients are configured we are ready to work */
1890 bp->state = BNX2X_STATE_OPEN;
1891
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001892 /* Configure a ucast MAC */
1893 rc = bnx2x_set_eth_mac(bp, true);
1894 if (rc)
1895 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001896
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001897 if (bp->pending_max) {
1898 bnx2x_update_max_mf_config(bp, bp->pending_max);
1899 bp->pending_max = 0;
1900 }
1901
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001902 if (bp->port.pmf)
1903 bnx2x_initial_phy_init(bp, load_mode);
1904
1905 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001906
1907 /* Initialize Rx filter. */
1908 netif_addr_lock_bh(bp->dev);
1909 bnx2x_set_rx_mode(bp->dev);
1910 netif_addr_unlock_bh(bp->dev);
1911
1912 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001913 switch (load_mode) {
1914 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001915 /* Tx queue should be only reenabled */
1916 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001917 break;
1918
1919 case LOAD_OPEN:
1920 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001921 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001922 break;
1923
1924 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001925 bp->state = BNX2X_STATE_DIAG;
1926 break;
1927
1928 default:
1929 break;
1930 }
1931
Dmitry Kravkov00253a82011-11-13 04:34:25 +00001932 if (bp->port.pmf)
1933 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1934 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001935 bnx2x__link_status_update(bp);
1936
1937 /* start the timer */
1938 mod_timer(&bp->timer, jiffies + bp->current_interval);
1939
1940#ifdef BCM_CNIC
Dmitry Kravkovb306f5e2011-11-13 04:34:24 +00001941 /* re-read iscsi info */
1942 bnx2x_get_iscsi_info(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001943 bnx2x_setup_cnic_irq_info(bp);
1944 if (bp->state == BNX2X_STATE_OPEN)
1945 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1946#endif
1947 bnx2x_inc_load_cnt(bp);
1948
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001949 /* Wait for all pending SP commands to complete */
1950 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1951 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1952 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1953 return -EBUSY;
1954 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001955
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001956 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001957 return 0;
1958
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001959#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001960load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001961#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001962 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001963 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001964#endif
1965load_error3:
1966 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001967
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001968 /* Clean queueable objects */
1969 bnx2x_squeeze_objects(bp);
1970
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001971 /* Free SKBs, SGEs, TPA pool and driver internals */
1972 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001973 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001974 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001975
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001976 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001977 bnx2x_free_irq(bp);
1978load_error2:
1979 if (!BP_NOMCP(bp)) {
1980 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1981 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1982 }
1983
1984 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001985load_error1:
1986 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001987load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001988 bnx2x_free_mem(bp);
1989
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001990 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001991#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001992}
1993
1994/* must be called with rtnl_lock */
1995int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1996{
1997 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001998 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001999
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002000 if ((bp->state == BNX2X_STATE_CLOSED) ||
2001 (bp->state == BNX2X_STATE_ERROR)) {
2002 /* We can get here if the driver has been unloaded
2003 * during parity error recovery and is either waiting for a
2004 * leader to complete or for other functions to unload and
2005 * then ifdown has been issued. In this case we want to
2006 * unload and let other functions to complete a recovery
2007 * process.
2008 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002009 bp->recovery_state = BNX2X_RECOVERY_DONE;
2010 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002011 bnx2x_release_leader_lock(bp);
2012 smp_mb();
2013
2014 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002015
2016 return -EINVAL;
2017 }
2018
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002019 /*
2020 * It's important to set the bp->state to the value different from
2021 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2022 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2023 */
2024 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2025 smp_mb();
2026
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002027 /* Stop Tx */
2028 bnx2x_tx_disable(bp);
2029
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002030#ifdef BCM_CNIC
2031 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2032#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002033
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002034 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002035
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002036 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002037
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002038 /* Set ALWAYS_ALIVE bit in shmem */
2039 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2040
2041 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002042
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002043 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002044
2045 /* Cleanup the chip if needed */
2046 if (unload_mode != UNLOAD_RECOVERY)
2047 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002048 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002049 /* Send the UNLOAD_REQUEST to the MCP */
2050 bnx2x_send_unload_req(bp, unload_mode);
2051
2052 /*
2053 * Prevent transactions to host from the functions on the
2054 * engine that doesn't reset global blocks in case of global
2055 * attention once gloabl blocks are reset and gates are opened
2056 * (the engine which leader will perform the recovery
2057 * last).
2058 */
2059 if (!CHIP_IS_E1x(bp))
2060 bnx2x_pf_disable(bp);
2061
2062 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002063 bnx2x_netif_stop(bp, 1);
2064
2065 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002066 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002067
2068 /* Report UNLOAD_DONE to MCP */
2069 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002070 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002071
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002072 /*
2073 * At this stage no more interrupts will arrive so we may safly clean
2074 * the queueable objects here in case they failed to get cleaned so far.
2075 */
2076 bnx2x_squeeze_objects(bp);
2077
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002078 /* There should be no more pending SP commands at this stage */
2079 bp->sp_state = 0;
2080
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002081 bp->port.pmf = 0;
2082
2083 /* Free SKBs, SGEs, TPA pool and driver internals */
2084 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002085 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002086 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002087
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002088 bnx2x_free_mem(bp);
2089
2090 bp->state = BNX2X_STATE_CLOSED;
2091
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002092 /* Check if there are pending parity attentions. If there are - set
2093 * RECOVERY_IN_PROGRESS.
2094 */
2095 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2096 bnx2x_set_reset_in_progress(bp);
2097
2098 /* Set RESET_IS_GLOBAL if needed */
2099 if (global)
2100 bnx2x_set_reset_global(bp);
2101 }
2102
2103
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002104 /* The last driver must disable a "close the gate" if there is no
2105 * parity attention or "process kill" pending.
2106 */
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002107 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002108 bnx2x_disable_close_the_gate(bp);
2109
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002110 return 0;
2111}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002112
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002113int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2114{
2115 u16 pmcsr;
2116
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002117 /* If there is no power capability, silently succeed */
2118 if (!bp->pm_cap) {
2119 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2120 return 0;
2121 }
2122
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002123 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2124
2125 switch (state) {
2126 case PCI_D0:
2127 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2128 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2129 PCI_PM_CTRL_PME_STATUS));
2130
2131 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2132 /* delay required during transition out of D3hot */
2133 msleep(20);
2134 break;
2135
2136 case PCI_D3hot:
2137 /* If there are other clients above don't
2138 shut down the power */
2139 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2140 return 0;
2141 /* Don't shut down the power for emulation and FPGA */
2142 if (CHIP_REV_IS_SLOW(bp))
2143 return 0;
2144
2145 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2146 pmcsr |= 3;
2147
2148 if (bp->wol)
2149 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2150
2151 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2152 pmcsr);
2153
2154 /* No more memory access after this point until
2155 * device is brought back to D0.
2156 */
2157 break;
2158
2159 default:
2160 return -EINVAL;
2161 }
2162 return 0;
2163}
2164
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002165/*
2166 * net_device service functions
2167 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002168int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002169{
2170 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002171 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002172 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2173 napi);
2174 struct bnx2x *bp = fp->bp;
2175
2176 while (1) {
2177#ifdef BNX2X_STOP_ON_ERROR
2178 if (unlikely(bp->panic)) {
2179 napi_complete(napi);
2180 return 0;
2181 }
2182#endif
2183
Ariel Elior6383c0b2011-07-14 08:31:57 +00002184 for_each_cos_in_tx_queue(fp, cos)
2185 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2186 bnx2x_tx_int(bp, &fp->txdata[cos]);
2187
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002188
2189 if (bnx2x_has_rx_work(fp)) {
2190 work_done += bnx2x_rx_int(fp, budget - work_done);
2191
2192 /* must not complete if we consumed full budget */
2193 if (work_done >= budget)
2194 break;
2195 }
2196
2197 /* Fall out from the NAPI loop if needed */
2198 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002199#ifdef BCM_CNIC
2200 /* No need to update SB for FCoE L2 ring as long as
2201 * it's connected to the default SB and the SB
2202 * has been updated when NAPI was scheduled.
2203 */
2204 if (IS_FCOE_FP(fp)) {
2205 napi_complete(napi);
2206 break;
2207 }
2208#endif
2209
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002210 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002211 /* bnx2x_has_rx_work() reads the status block,
2212 * thus we need to ensure that status block indices
2213 * have been actually read (bnx2x_update_fpsb_idx)
2214 * prior to this check (bnx2x_has_rx_work) so that
2215 * we won't write the "newer" value of the status block
2216 * to IGU (if there was a DMA right after
2217 * bnx2x_has_rx_work and if there is no rmb, the memory
2218 * reading (bnx2x_update_fpsb_idx) may be postponed
2219 * to right before bnx2x_ack_sb). In this case there
2220 * will never be another interrupt until there is
2221 * another update of the status block, while there
2222 * is still unhandled work.
2223 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002224 rmb();
2225
2226 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2227 napi_complete(napi);
2228 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002229 DP(NETIF_MSG_HW,
2230 "Update index to %d\n", fp->fp_hc_idx);
2231 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2232 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002233 IGU_INT_ENABLE, 1);
2234 break;
2235 }
2236 }
2237 }
2238
2239 return work_done;
2240}
2241
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002242/* we split the first BD into headers and data BDs
2243 * to ease the pain of our fellow microcode engineers
2244 * we use one mapping for both BDs
2245 * So far this has only been observed to happen
2246 * in Other Operating Systems(TM)
2247 */
2248static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002249 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002250 struct sw_tx_bd *tx_buf,
2251 struct eth_tx_start_bd **tx_bd, u16 hlen,
2252 u16 bd_prod, int nbd)
2253{
2254 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2255 struct eth_tx_bd *d_tx_bd;
2256 dma_addr_t mapping;
2257 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2258
2259 /* first fix first BD */
2260 h_tx_bd->nbd = cpu_to_le16(nbd);
2261 h_tx_bd->nbytes = cpu_to_le16(hlen);
2262
2263 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2264 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2265 h_tx_bd->addr_lo, h_tx_bd->nbd);
2266
2267 /* now get a new data BD
2268 * (after the pbd) and fill it */
2269 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002270 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002271
2272 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2273 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2274
2275 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2276 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2277 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2278
2279 /* this marks the BD as one that has no individual mapping */
2280 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2281
2282 DP(NETIF_MSG_TX_QUEUED,
2283 "TSO split data size is %d (%x:%x)\n",
2284 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2285
2286 /* update tx_bd */
2287 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2288
2289 return bd_prod;
2290}
2291
2292static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2293{
2294 if (fix > 0)
2295 csum = (u16) ~csum_fold(csum_sub(csum,
2296 csum_partial(t_header - fix, fix, 0)));
2297
2298 else if (fix < 0)
2299 csum = (u16) ~csum_fold(csum_add(csum,
2300 csum_partial(t_header, -fix, 0)));
2301
2302 return swab16(csum);
2303}
2304
2305static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2306{
2307 u32 rc;
2308
2309 if (skb->ip_summed != CHECKSUM_PARTIAL)
2310 rc = XMIT_PLAIN;
2311
2312 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002313 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002314 rc = XMIT_CSUM_V6;
2315 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2316 rc |= XMIT_CSUM_TCP;
2317
2318 } else {
2319 rc = XMIT_CSUM_V4;
2320 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2321 rc |= XMIT_CSUM_TCP;
2322 }
2323 }
2324
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002325 if (skb_is_gso_v6(skb))
2326 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2327 else if (skb_is_gso(skb))
2328 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002329
2330 return rc;
2331}
2332
2333#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2334/* check if packet requires linearization (packet is too fragmented)
2335 no need to check fragmentation if page size > 8K (there will be no
2336 violation to FW restrictions) */
2337static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2338 u32 xmit_type)
2339{
2340 int to_copy = 0;
2341 int hlen = 0;
2342 int first_bd_sz = 0;
2343
2344 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2345 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2346
2347 if (xmit_type & XMIT_GSO) {
2348 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2349 /* Check if LSO packet needs to be copied:
2350 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2351 int wnd_size = MAX_FETCH_BD - 3;
2352 /* Number of windows to check */
2353 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2354 int wnd_idx = 0;
2355 int frag_idx = 0;
2356 u32 wnd_sum = 0;
2357
2358 /* Headers length */
2359 hlen = (int)(skb_transport_header(skb) - skb->data) +
2360 tcp_hdrlen(skb);
2361
2362 /* Amount of data (w/o headers) on linear part of SKB*/
2363 first_bd_sz = skb_headlen(skb) - hlen;
2364
2365 wnd_sum = first_bd_sz;
2366
2367 /* Calculate the first sum - it's special */
2368 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2369 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002370 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002371
2372 /* If there was data on linear skb data - check it */
2373 if (first_bd_sz > 0) {
2374 if (unlikely(wnd_sum < lso_mss)) {
2375 to_copy = 1;
2376 goto exit_lbl;
2377 }
2378
2379 wnd_sum -= first_bd_sz;
2380 }
2381
2382 /* Others are easier: run through the frag list and
2383 check all windows */
2384 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2385 wnd_sum +=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002386 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002387
2388 if (unlikely(wnd_sum < lso_mss)) {
2389 to_copy = 1;
2390 break;
2391 }
2392 wnd_sum -=
Eric Dumazet9e903e02011-10-18 21:00:24 +00002393 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002394 }
2395 } else {
2396 /* in non-LSO too fragmented packet should always
2397 be linearized */
2398 to_copy = 1;
2399 }
2400 }
2401
2402exit_lbl:
2403 if (unlikely(to_copy))
2404 DP(NETIF_MSG_TX_QUEUED,
2405 "Linearization IS REQUIRED for %s packet. "
2406 "num_frags %d hlen %d first_bd_sz %d\n",
2407 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2408 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2409
2410 return to_copy;
2411}
2412#endif
2413
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002414static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2415 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002416{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002417 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2418 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2419 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002420 if ((xmit_type & XMIT_GSO_V6) &&
2421 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002422 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002423}
2424
2425/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002426 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002427 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002428 * @skb: packet skb
2429 * @pbd: parse BD
2430 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002431 */
2432static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2433 struct eth_tx_parse_bd_e1x *pbd,
2434 u32 xmit_type)
2435{
2436 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2437 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2438 pbd->tcp_flags = pbd_tcp_flags(skb);
2439
2440 if (xmit_type & XMIT_GSO_V4) {
2441 pbd->ip_id = swab16(ip_hdr(skb)->id);
2442 pbd->tcp_pseudo_csum =
2443 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2444 ip_hdr(skb)->daddr,
2445 0, IPPROTO_TCP, 0));
2446
2447 } else
2448 pbd->tcp_pseudo_csum =
2449 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2450 &ipv6_hdr(skb)->daddr,
2451 0, IPPROTO_TCP, 0));
2452
2453 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2454}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002455
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002456/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002457 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002458 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002459 * @bp: driver handle
2460 * @skb: packet skb
2461 * @parsing_data: data to be updated
2462 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002463 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002464 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002465 */
2466static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002467 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002468{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002469 *parsing_data |=
2470 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2471 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2472 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002473
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002474 if (xmit_type & XMIT_CSUM_TCP) {
2475 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2476 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2477 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002478
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002479 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2480 } else
2481 /* We support checksum offload for TCP and UDP only.
2482 * No need to pass the UDP header length - it's a constant.
2483 */
2484 return skb_transport_header(skb) +
2485 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002486}
2487
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002488static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2489 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2490{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002491 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2492
2493 if (xmit_type & XMIT_CSUM_V4)
2494 tx_start_bd->bd_flags.as_bitfield |=
2495 ETH_TX_BD_FLAGS_IP_CSUM;
2496 else
2497 tx_start_bd->bd_flags.as_bitfield |=
2498 ETH_TX_BD_FLAGS_IPV6;
2499
2500 if (!(xmit_type & XMIT_CSUM_TCP))
2501 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002502}
2503
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002504/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002505 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002506 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002507 * @bp: driver handle
2508 * @skb: packet skb
2509 * @pbd: parse BD to be updated
2510 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002511 */
2512static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2513 struct eth_tx_parse_bd_e1x *pbd,
2514 u32 xmit_type)
2515{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002516 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002517
2518 /* for now NS flag is not used in Linux */
2519 pbd->global_data =
2520 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2521 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2522
2523 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002524 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002525
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002526 hlen += pbd->ip_hlen_w;
2527
2528 /* We support checksum offload for TCP and UDP only */
2529 if (xmit_type & XMIT_CSUM_TCP)
2530 hlen += tcp_hdrlen(skb) / 2;
2531 else
2532 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002533
2534 pbd->total_hlen_w = cpu_to_le16(hlen);
2535 hlen = hlen*2;
2536
2537 if (xmit_type & XMIT_CSUM_TCP) {
2538 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2539
2540 } else {
2541 s8 fix = SKB_CS_OFF(skb); /* signed! */
2542
2543 DP(NETIF_MSG_TX_QUEUED,
2544 "hlen %d fix %d csum before fix %x\n",
2545 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2546
2547 /* HW bug: fixup the CSUM */
2548 pbd->tcp_pseudo_csum =
2549 bnx2x_csum_fix(skb_transport_header(skb),
2550 SKB_CS(skb), fix);
2551
2552 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2553 pbd->tcp_pseudo_csum);
2554 }
2555
2556 return hlen;
2557}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002558
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002559/* called with netif_tx_lock
2560 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2561 * netif_wake_queue()
2562 */
2563netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2564{
2565 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002566
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002567 struct bnx2x_fastpath *fp;
2568 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002569 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002570 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002571 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002572 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002573 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002574 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002575 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002576 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002577 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002578 dma_addr_t mapping;
2579 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2580 int i;
2581 u8 hlen = 0;
2582 __le16 pkt_size = 0;
2583 struct ethhdr *eth;
2584 u8 mac_type = UNICAST_ADDRESS;
2585
2586#ifdef BNX2X_STOP_ON_ERROR
2587 if (unlikely(bp->panic))
2588 return NETDEV_TX_BUSY;
2589#endif
2590
Ariel Elior6383c0b2011-07-14 08:31:57 +00002591 txq_index = skb_get_queue_mapping(skb);
2592 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002593
Ariel Elior6383c0b2011-07-14 08:31:57 +00002594 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2595
2596 /* decode the fastpath index and the cos index from the txq */
2597 fp_index = TXQ_TO_FP(txq_index);
2598 txdata_index = TXQ_TO_COS(txq_index);
2599
2600#ifdef BCM_CNIC
2601 /*
2602 * Override the above for the FCoE queue:
2603 * - FCoE fp entry is right after the ETH entries.
2604 * - FCoE L2 queue uses bp->txdata[0] only.
2605 */
2606 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2607 bnx2x_fcoe_tx(bp, txq_index)))) {
2608 fp_index = FCOE_IDX;
2609 txdata_index = 0;
2610 }
2611#endif
2612
2613 /* enable this debug print to view the transmission queue being used
Joe Perches94f05b02011-08-14 12:16:20 +00002614 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002615 txq_index, fp_index, txdata_index); */
2616
2617 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002618 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002619 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002620
Ariel Elior6383c0b2011-07-14 08:31:57 +00002621 /* enable this debug print to view the tranmission details
2622 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
Joe Perches94f05b02011-08-14 12:16:20 +00002623 " tx_data ptr %p fp pointer %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002624 txdata->cid, fp_index, txdata_index, txdata, fp); */
2625
2626 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2627 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002628 fp->eth_q_stats.driver_xoff++;
2629 netif_tx_stop_queue(txq);
2630 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2631 return NETDEV_TX_BUSY;
2632 }
2633
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002634 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2635 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002636 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002637 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2638
2639 eth = (struct ethhdr *)skb->data;
2640
2641 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2642 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2643 if (is_broadcast_ether_addr(eth->h_dest))
2644 mac_type = BROADCAST_ADDRESS;
2645 else
2646 mac_type = MULTICAST_ADDRESS;
2647 }
2648
2649#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2650 /* First, check if we need to linearize the skb (due to FW
2651 restrictions). No need to check fragmentation if page size > 8K
2652 (there will be no violation to FW restrictions) */
2653 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2654 /* Statistics of linearization */
2655 bp->lin_cnt++;
2656 if (skb_linearize(skb) != 0) {
2657 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2658 "silently dropping this SKB\n");
2659 dev_kfree_skb_any(skb);
2660 return NETDEV_TX_OK;
2661 }
2662 }
2663#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002664 /* Map skb linear data for DMA */
2665 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2666 skb_headlen(skb), DMA_TO_DEVICE);
2667 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2668 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2669 "silently dropping this SKB\n");
2670 dev_kfree_skb_any(skb);
2671 return NETDEV_TX_OK;
2672 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002673 /*
2674 Please read carefully. First we use one BD which we mark as start,
2675 then we have a parsing info BD (used for TSO or xsum),
2676 and only then we have the rest of the TSO BDs.
2677 (don't forget to mark the last one as last,
2678 and to unmap only AFTER you write to the BD ...)
2679 And above all, all pdb sizes are in words - NOT DWORDS!
2680 */
2681
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002682 /* get current pkt produced now - advance it just before sending packet
2683 * since mapping of pages may fail and cause packet to be dropped
2684 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002685 pkt_prod = txdata->tx_pkt_prod;
2686 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002687
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002688 /* get a tx_buf and first BD
2689 * tx_start_bd may be changed during SPLIT,
2690 * but first_bd will always stay first
2691 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002692 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2693 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002694 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002695
2696 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002697 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2698 mac_type);
2699
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002700 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002701 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002702
2703 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002704 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002705 tx_buf->skb = skb;
2706 tx_buf->flags = 0;
2707
2708 DP(NETIF_MSG_TX_QUEUED,
2709 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002710 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002711
Jesse Grosseab6d182010-10-20 13:56:03 +00002712 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002713 tx_start_bd->vlan_or_ethertype =
2714 cpu_to_le16(vlan_tx_tag_get(skb));
2715 tx_start_bd->bd_flags.as_bitfield |=
2716 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002717 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002718 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002719
2720 /* turn on parsing and get a BD */
2721 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002722
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002723 if (xmit_type & XMIT_CSUM)
2724 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002725
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002726 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002727 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002728 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2729 /* Set PBD in checksum offload case */
2730 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002731 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2732 &pbd_e2_parsing_data,
2733 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002734 if (IS_MF_SI(bp)) {
2735 /*
2736 * fill in the MAC addresses in the PBD - for local
2737 * switching
2738 */
2739 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2740 &pbd_e2->src_mac_addr_mid,
2741 &pbd_e2->src_mac_addr_lo,
2742 eth->h_source);
2743 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2744 &pbd_e2->dst_mac_addr_mid,
2745 &pbd_e2->dst_mac_addr_lo,
2746 eth->h_dest);
2747 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002748 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002749 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002750 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2751 /* Set PBD in checksum offload case */
2752 if (xmit_type & XMIT_CSUM)
2753 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002754
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002755 }
2756
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002757 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002758 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2759 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002760 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002761 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2762 pkt_size = tx_start_bd->nbytes;
2763
2764 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2765 " nbytes %d flags %x vlan %x\n",
2766 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2767 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002768 tx_start_bd->bd_flags.as_bitfield,
2769 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002770
2771 if (xmit_type & XMIT_GSO) {
2772
2773 DP(NETIF_MSG_TX_QUEUED,
2774 "TSO packet len %d hlen %d total len %d tso size %d\n",
2775 skb->len, hlen, skb_headlen(skb),
2776 skb_shinfo(skb)->gso_size);
2777
2778 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2779
2780 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002781 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2782 &tx_start_bd, hlen,
2783 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002784 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002785 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2786 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002787 else
2788 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002789 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002790
2791 /* Set the PBD's parsing_data field if not zero
2792 * (for the chips newer than 57711).
2793 */
2794 if (pbd_e2_parsing_data)
2795 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2796
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002797 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2798
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002799 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002800 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2801 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2802
Eric Dumazet9e903e02011-10-18 21:00:24 +00002803 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2804 skb_frag_size(frag), DMA_TO_DEVICE);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002805 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2806
2807 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2808 "dropping packet...\n");
2809
2810 /* we need unmap all buffers already mapped
2811 * for this SKB;
2812 * first_bd->nbd need to be properly updated
2813 * before call to bnx2x_free_tx_pkt
2814 */
2815 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002816 bnx2x_free_tx_pkt(bp, txdata,
2817 TX_BD(txdata->tx_pkt_prod));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002818 return NETDEV_TX_OK;
2819 }
2820
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002821 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002822 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002823 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002824 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002825
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002826 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2827 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eric Dumazet9e903e02011-10-18 21:00:24 +00002828 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2829 le16_add_cpu(&pkt_size, skb_frag_size(frag));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002830 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002831
2832 DP(NETIF_MSG_TX_QUEUED,
2833 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2834 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2835 le16_to_cpu(tx_data_bd->nbytes));
2836 }
2837
2838 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2839
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002840 /* update with actual num BDs */
2841 first_bd->nbd = cpu_to_le16(nbd);
2842
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002843 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2844
2845 /* now send a tx doorbell, counting the next BD
2846 * if the packet contains or ends with it
2847 */
2848 if (TX_BD_POFF(bd_prod) < nbd)
2849 nbd++;
2850
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002851 /* total_pkt_bytes should be set on the first data BD if
2852 * it's not an LSO packet and there is more than one
2853 * data BD. In this case pkt_size is limited by an MTU value.
2854 * However we prefer to set it for an LSO packet (while we don't
2855 * have to) in order to save some CPU cycles in a none-LSO
2856 * case, when we much more care about them.
2857 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002858 if (total_pkt_bd != NULL)
2859 total_pkt_bd->total_pkt_bytes = pkt_size;
2860
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002861 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002862 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002863 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002865 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2866 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2867 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2868 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002869 if (pbd_e2)
2870 DP(NETIF_MSG_TX_QUEUED,
2871 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2872 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2873 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2874 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2875 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002876 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2877
Ariel Elior6383c0b2011-07-14 08:31:57 +00002878 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002879 /*
2880 * Make sure that the BD data is updated before updating the producer
2881 * since FW might read the BD right after the producer is updated.
2882 * This is only applicable for weak-ordered memory model archs such
2883 * as IA-64. The following barrier is also mandatory since FW will
2884 * assumes packets must have BDs.
2885 */
2886 wmb();
2887
Ariel Elior6383c0b2011-07-14 08:31:57 +00002888 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002890
Ariel Elior6383c0b2011-07-14 08:31:57 +00002891 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002892
2893 mmiowb();
2894
Ariel Elior6383c0b2011-07-14 08:31:57 +00002895 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002896
Ariel Elior6383c0b2011-07-14 08:31:57 +00002897 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002898 netif_tx_stop_queue(txq);
2899
2900 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2901 * ordering of set_bit() in netif_tx_stop_queue() and read of
2902 * fp->bd_tx_cons */
2903 smp_mb();
2904
2905 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002906 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002907 netif_tx_wake_queue(txq);
2908 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002909 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002910
2911 return NETDEV_TX_OK;
2912}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002913
Ariel Elior6383c0b2011-07-14 08:31:57 +00002914/**
2915 * bnx2x_setup_tc - routine to configure net_device for multi tc
2916 *
2917 * @netdev: net device to configure
2918 * @tc: number of traffic classes to enable
2919 *
2920 * callback connected to the ndo_setup_tc function pointer
2921 */
2922int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2923{
2924 int cos, prio, count, offset;
2925 struct bnx2x *bp = netdev_priv(dev);
2926
2927 /* setup tc must be called under rtnl lock */
2928 ASSERT_RTNL();
2929
2930 /* no traffic classes requested. aborting */
2931 if (!num_tc) {
2932 netdev_reset_tc(dev);
2933 return 0;
2934 }
2935
2936 /* requested to support too many traffic classes */
2937 if (num_tc > bp->max_cos) {
2938 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
Joe Perches94f05b02011-08-14 12:16:20 +00002939 " requested: %d. max supported is %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002940 num_tc, bp->max_cos);
2941 return -EINVAL;
2942 }
2943
2944 /* declare amount of supported traffic classes */
2945 if (netdev_set_num_tc(dev, num_tc)) {
Joe Perches94f05b02011-08-14 12:16:20 +00002946 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002947 num_tc);
2948 return -EINVAL;
2949 }
2950
2951 /* configure priority to traffic class mapping */
2952 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2953 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
Joe Perches94f05b02011-08-14 12:16:20 +00002954 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002955 prio, bp->prio_to_cos[prio]);
2956 }
2957
2958
2959 /* Use this configuration to diffrentiate tc0 from other COSes
2960 This can be used for ets or pfc, and save the effort of setting
2961 up a multio class queue disc or negotiating DCBX with a switch
2962 netdev_set_prio_tc_map(dev, 0, 0);
Joe Perches94f05b02011-08-14 12:16:20 +00002963 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002964 for (prio = 1; prio < 16; prio++) {
2965 netdev_set_prio_tc_map(dev, prio, 1);
Joe Perches94f05b02011-08-14 12:16:20 +00002966 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002967 } */
2968
2969 /* configure traffic class to transmission queue mapping */
2970 for (cos = 0; cos < bp->max_cos; cos++) {
2971 count = BNX2X_NUM_ETH_QUEUES(bp);
2972 offset = cos * MAX_TXQS_PER_COS;
2973 netdev_set_tc_queue(dev, cos, count, offset);
Joe Perches94f05b02011-08-14 12:16:20 +00002974 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002975 cos, offset, count);
2976 }
2977
2978 return 0;
2979}
2980
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002981/* called with rtnl_lock */
2982int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2983{
2984 struct sockaddr *addr = p;
2985 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002986 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002987
2988 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2989 return -EINVAL;
2990
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002991 if (netif_running(dev)) {
2992 rc = bnx2x_set_eth_mac(bp, false);
2993 if (rc)
2994 return rc;
2995 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002996
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002997 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2998
2999 if (netif_running(dev))
3000 rc = bnx2x_set_eth_mac(bp, true);
3001
3002 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003003}
3004
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003005static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3006{
3007 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3008 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003009 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003010
3011 /* Common */
3012#ifdef BCM_CNIC
3013 if (IS_FCOE_IDX(fp_index)) {
3014 memset(sb, 0, sizeof(union host_hc_status_block));
3015 fp->status_blk_mapping = 0;
3016
3017 } else {
3018#endif
3019 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003020 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003021 BNX2X_PCI_FREE(sb->e2_sb,
3022 bnx2x_fp(bp, fp_index,
3023 status_blk_mapping),
3024 sizeof(struct host_hc_status_block_e2));
3025 else
3026 BNX2X_PCI_FREE(sb->e1x_sb,
3027 bnx2x_fp(bp, fp_index,
3028 status_blk_mapping),
3029 sizeof(struct host_hc_status_block_e1x));
3030#ifdef BCM_CNIC
3031 }
3032#endif
3033 /* Rx */
3034 if (!skip_rx_queue(bp, fp_index)) {
3035 bnx2x_free_rx_bds(fp);
3036
3037 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3038 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3039 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3040 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3041 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3042
3043 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3044 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3045 sizeof(struct eth_fast_path_rx_cqe) *
3046 NUM_RCQ_BD);
3047
3048 /* SGE ring */
3049 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3050 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3051 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3052 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3053 }
3054
3055 /* Tx */
3056 if (!skip_tx_queue(bp, fp_index)) {
3057 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003058 for_each_cos_in_tx_queue(fp, cos) {
3059 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3060
3061 DP(BNX2X_MSG_SP,
Joe Perches94f05b02011-08-14 12:16:20 +00003062 "freeing tx memory of fp %d cos %d cid %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003063 fp_index, cos, txdata->cid);
3064
3065 BNX2X_FREE(txdata->tx_buf_ring);
3066 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3067 txdata->tx_desc_mapping,
3068 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3069 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003070 }
3071 /* end of fastpath */
3072}
3073
3074void bnx2x_free_fp_mem(struct bnx2x *bp)
3075{
3076 int i;
3077 for_each_queue(bp, i)
3078 bnx2x_free_fp_mem_at(bp, i);
3079}
3080
3081static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3082{
3083 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003084 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003085 bnx2x_fp(bp, index, sb_index_values) =
3086 (__le16 *)status_blk.e2_sb->sb.index_values;
3087 bnx2x_fp(bp, index, sb_running_index) =
3088 (__le16 *)status_blk.e2_sb->sb.running_index;
3089 } else {
3090 bnx2x_fp(bp, index, sb_index_values) =
3091 (__le16 *)status_blk.e1x_sb->sb.index_values;
3092 bnx2x_fp(bp, index, sb_running_index) =
3093 (__le16 *)status_blk.e1x_sb->sb.running_index;
3094 }
3095}
3096
3097static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3098{
3099 union host_hc_status_block *sb;
3100 struct bnx2x_fastpath *fp = &bp->fp[index];
3101 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003102 u8 cos;
David S. Miller8decf862011-09-22 03:23:13 -04003103 int rx_ring_size = 0;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003104
3105 /* if rx_ring_size specified - use it */
David S. Miller8decf862011-09-22 03:23:13 -04003106 if (!bp->rx_ring_size) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003107
David S. Miller8decf862011-09-22 03:23:13 -04003108 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3109
3110 /* allocate at least number of buffers required by FW */
3111 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3112 MIN_RX_SIZE_TPA, rx_ring_size);
3113
3114 bp->rx_ring_size = rx_ring_size;
3115 } else
3116 rx_ring_size = bp->rx_ring_size;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003117
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003118 /* Common */
3119 sb = &bnx2x_fp(bp, index, status_blk);
3120#ifdef BCM_CNIC
3121 if (!IS_FCOE_IDX(index)) {
3122#endif
3123 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003124 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003125 BNX2X_PCI_ALLOC(sb->e2_sb,
3126 &bnx2x_fp(bp, index, status_blk_mapping),
3127 sizeof(struct host_hc_status_block_e2));
3128 else
3129 BNX2X_PCI_ALLOC(sb->e1x_sb,
3130 &bnx2x_fp(bp, index, status_blk_mapping),
3131 sizeof(struct host_hc_status_block_e1x));
3132#ifdef BCM_CNIC
3133 }
3134#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003135
3136 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3137 * set shortcuts for it.
3138 */
3139 if (!IS_FCOE_IDX(index))
3140 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003141
3142 /* Tx */
3143 if (!skip_tx_queue(bp, index)) {
3144 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003145 for_each_cos_in_tx_queue(fp, cos) {
3146 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3147
3148 DP(BNX2X_MSG_SP, "allocating tx memory of "
Joe Perches94f05b02011-08-14 12:16:20 +00003149 "fp %d cos %d\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00003150 index, cos);
3151
3152 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003153 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003154 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3155 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003156 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003157 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003158 }
3159
3160 /* Rx */
3161 if (!skip_rx_queue(bp, index)) {
3162 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3163 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3164 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3165 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3166 &bnx2x_fp(bp, index, rx_desc_mapping),
3167 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3168
3169 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3170 &bnx2x_fp(bp, index, rx_comp_mapping),
3171 sizeof(struct eth_fast_path_rx_cqe) *
3172 NUM_RCQ_BD);
3173
3174 /* SGE ring */
3175 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3176 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3177 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3178 &bnx2x_fp(bp, index, rx_sge_mapping),
3179 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3180 /* RX BD ring */
3181 bnx2x_set_next_page_rx_bd(fp);
3182
3183 /* CQ ring */
3184 bnx2x_set_next_page_rx_cq(fp);
3185
3186 /* BDs */
3187 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3188 if (ring_size < rx_ring_size)
3189 goto alloc_mem_err;
3190 }
3191
3192 return 0;
3193
3194/* handles low memory cases */
3195alloc_mem_err:
3196 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3197 index, ring_size);
3198 /* FW will drop all packets if queue is not big enough,
3199 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003200 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003201 */
3202 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003203 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003204 /* release memory allocated for this queue */
3205 bnx2x_free_fp_mem_at(bp, index);
3206 return -ENOMEM;
3207 }
3208 return 0;
3209}
3210
3211int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3212{
3213 int i;
3214
3215 /**
3216 * 1. Allocate FP for leading - fatal if error
3217 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003218 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3219 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003220 */
3221
3222 /* leading */
3223 if (bnx2x_alloc_fp_mem_at(bp, 0))
3224 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003225
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003226#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003227 if (!NO_FCOE(bp))
3228 /* FCoE */
3229 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3230 /* we will fail load process instead of mark
3231 * NO_FCOE_FLAG
3232 */
3233 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003234#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003235
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003236 /* RSS */
3237 for_each_nondefault_eth_queue(bp, i)
3238 if (bnx2x_alloc_fp_mem_at(bp, i))
3239 break;
3240
3241 /* handle memory failures */
3242 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3243 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3244
3245 WARN_ON(delta < 0);
3246#ifdef BCM_CNIC
3247 /**
3248 * move non eth FPs next to last eth FP
3249 * must be done in that order
3250 * FCOE_IDX < FWD_IDX < OOO_IDX
3251 */
3252
Ariel Elior6383c0b2011-07-14 08:31:57 +00003253 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003254 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3255#endif
3256 bp->num_queues -= delta;
3257 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3258 bp->num_queues + delta, bp->num_queues);
3259 }
3260
3261 return 0;
3262}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003263
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003264void bnx2x_free_mem_bp(struct bnx2x *bp)
3265{
3266 kfree(bp->fp);
3267 kfree(bp->msix_table);
3268 kfree(bp->ilt);
3269}
3270
3271int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3272{
3273 struct bnx2x_fastpath *fp;
3274 struct msix_entry *tbl;
3275 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003276 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003277
Ariel Elior6383c0b2011-07-14 08:31:57 +00003278 /*
3279 * The biggest MSI-X table we might need is as a maximum number of fast
3280 * path IGU SBs plus default SB (for PF).
3281 */
3282 msix_table_size = bp->igu_sb_cnt + 1;
3283
3284 /* fp array: RSS plus CNIC related L2 queues */
3285 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3286 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003287 if (!fp)
3288 goto alloc_err;
3289 bp->fp = fp;
3290
3291 /* msix table */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003292 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003293 if (!tbl)
3294 goto alloc_err;
3295 bp->msix_table = tbl;
3296
3297 /* ilt */
3298 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3299 if (!ilt)
3300 goto alloc_err;
3301 bp->ilt = ilt;
3302
3303 return 0;
3304alloc_err:
3305 bnx2x_free_mem_bp(bp);
3306 return -ENOMEM;
3307
3308}
3309
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003310int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003311{
3312 struct bnx2x *bp = netdev_priv(dev);
3313
3314 if (unlikely(!netif_running(dev)))
3315 return 0;
3316
3317 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3318 return bnx2x_nic_load(bp, LOAD_NORMAL);
3319}
3320
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003321int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3322{
3323 u32 sel_phy_idx = 0;
3324 if (bp->link_params.num_phys <= 1)
3325 return INT_PHY;
3326
3327 if (bp->link_vars.link_up) {
3328 sel_phy_idx = EXT_PHY1;
3329 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3330 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3331 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3332 sel_phy_idx = EXT_PHY2;
3333 } else {
3334
3335 switch (bnx2x_phy_selection(&bp->link_params)) {
3336 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3337 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3338 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3339 sel_phy_idx = EXT_PHY1;
3340 break;
3341 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3342 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3343 sel_phy_idx = EXT_PHY2;
3344 break;
3345 }
3346 }
3347
3348 return sel_phy_idx;
3349
3350}
3351int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3352{
3353 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3354 /*
3355 * The selected actived PHY is always after swapping (in case PHY
3356 * swapping is enabled). So when swapping is enabled, we need to reverse
3357 * the configuration
3358 */
3359
3360 if (bp->link_params.multi_phy_config &
3361 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3362 if (sel_phy_idx == EXT_PHY1)
3363 sel_phy_idx = EXT_PHY2;
3364 else if (sel_phy_idx == EXT_PHY2)
3365 sel_phy_idx = EXT_PHY1;
3366 }
3367 return LINK_CONFIG_IDX(sel_phy_idx);
3368}
3369
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003370#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3371int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3372{
3373 struct bnx2x *bp = netdev_priv(dev);
3374 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3375
3376 switch (type) {
3377 case NETDEV_FCOE_WWNN:
3378 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3379 cp->fcoe_wwn_node_name_lo);
3380 break;
3381 case NETDEV_FCOE_WWPN:
3382 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3383 cp->fcoe_wwn_port_name_lo);
3384 break;
3385 default:
3386 return -EINVAL;
3387 }
3388
3389 return 0;
3390}
3391#endif
3392
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003393/* called with rtnl_lock */
3394int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3395{
3396 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003397
3398 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Joe Perchesf1deab52011-08-14 12:16:21 +00003399 pr_err("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003400 return -EAGAIN;
3401 }
3402
3403 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3404 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3405 return -EINVAL;
3406
3407 /* This does not race with packet allocation
3408 * because the actual alloc size is
3409 * only updated as part of load
3410 */
3411 dev->mtu = new_mtu;
3412
Michał Mirosław66371c42011-04-12 09:38:23 +00003413 return bnx2x_reload_if_running(dev);
3414}
3415
3416u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3417{
3418 struct bnx2x *bp = netdev_priv(dev);
3419
3420 /* TPA requires Rx CSUM offloading */
3421 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3422 features &= ~NETIF_F_LRO;
3423
3424 return features;
3425}
3426
3427int bnx2x_set_features(struct net_device *dev, u32 features)
3428{
3429 struct bnx2x *bp = netdev_priv(dev);
3430 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003431 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003432
3433 if (features & NETIF_F_LRO)
3434 flags |= TPA_ENABLE_FLAG;
3435 else
3436 flags &= ~TPA_ENABLE_FLAG;
3437
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003438 if (features & NETIF_F_LOOPBACK) {
3439 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3440 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3441 bnx2x_reload = true;
3442 }
3443 } else {
3444 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3445 bp->link_params.loopback_mode = LOOPBACK_NONE;
3446 bnx2x_reload = true;
3447 }
3448 }
3449
Michał Mirosław66371c42011-04-12 09:38:23 +00003450 if (flags ^ bp->flags) {
3451 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003452 bnx2x_reload = true;
3453 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003454
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003455 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003456 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3457 return bnx2x_reload_if_running(dev);
3458 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003459 }
3460
Michał Mirosław66371c42011-04-12 09:38:23 +00003461 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003462}
3463
3464void bnx2x_tx_timeout(struct net_device *dev)
3465{
3466 struct bnx2x *bp = netdev_priv(dev);
3467
3468#ifdef BNX2X_STOP_ON_ERROR
3469 if (!bp->panic)
3470 bnx2x_panic();
3471#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003472
3473 smp_mb__before_clear_bit();
3474 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3475 smp_mb__after_clear_bit();
3476
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003477 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003478 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003479}
3480
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003481int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3482{
3483 struct net_device *dev = pci_get_drvdata(pdev);
3484 struct bnx2x *bp;
3485
3486 if (!dev) {
3487 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3488 return -ENODEV;
3489 }
3490 bp = netdev_priv(dev);
3491
3492 rtnl_lock();
3493
3494 pci_save_state(pdev);
3495
3496 if (!netif_running(dev)) {
3497 rtnl_unlock();
3498 return 0;
3499 }
3500
3501 netif_device_detach(dev);
3502
3503 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3504
3505 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3506
3507 rtnl_unlock();
3508
3509 return 0;
3510}
3511
3512int bnx2x_resume(struct pci_dev *pdev)
3513{
3514 struct net_device *dev = pci_get_drvdata(pdev);
3515 struct bnx2x *bp;
3516 int rc;
3517
3518 if (!dev) {
3519 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3520 return -ENODEV;
3521 }
3522 bp = netdev_priv(dev);
3523
3524 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Joe Perchesf1deab52011-08-14 12:16:21 +00003525 pr_err("Handling parity error recovery. Try again later\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003526 return -EAGAIN;
3527 }
3528
3529 rtnl_lock();
3530
3531 pci_restore_state(pdev);
3532
3533 if (!netif_running(dev)) {
3534 rtnl_unlock();
3535 return 0;
3536 }
3537
3538 bnx2x_set_power_state(bp, PCI_D0);
3539 netif_device_attach(dev);
3540
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003541 /* Since the chip was reset, clear the FW sequence number */
3542 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003543 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3544
3545 rtnl_unlock();
3546
3547 return rc;
3548}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003549
3550
3551void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3552 u32 cid)
3553{
3554 /* ustorm cxt validation */
3555 cxt->ustorm_ag_context.cdu_usage =
3556 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3557 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3558 /* xcontext validation */
3559 cxt->xstorm_ag_context.cdu_reserved =
3560 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3561 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3562}
3563
3564static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3565 u8 fw_sb_id, u8 sb_index,
3566 u8 ticks)
3567{
3568
3569 u32 addr = BAR_CSTRORM_INTMEM +
3570 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3571 REG_WR8(bp, addr, ticks);
3572 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3573 port, fw_sb_id, sb_index, ticks);
3574}
3575
3576static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3577 u16 fw_sb_id, u8 sb_index,
3578 u8 disable)
3579{
3580 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3581 u32 addr = BAR_CSTRORM_INTMEM +
3582 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3583 u16 flags = REG_RD16(bp, addr);
3584 /* clear and set */
3585 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3586 flags |= enable_flag;
3587 REG_WR16(bp, addr, flags);
3588 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3589 port, fw_sb_id, sb_index, disable);
3590}
3591
3592void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3593 u8 sb_index, u8 disable, u16 usec)
3594{
3595 int port = BP_PORT(bp);
3596 u8 ticks = usec / BNX2X_BTR;
3597
3598 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3599
3600 disable = disable ? 1 : (usec ? 0 : 1);
3601 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3602}