blob: c81550b076adff12ed367991e966e870f8987a63 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
Jiri Pirkof859d7c2011-07-20 04:54:14 +000018#include <linux/bitops.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070019#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040027#include <linux/prefetch.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070028
29#include "bnad.h"
30#include "bna.h"
31#include "cna.h"
32
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000033static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070034
35/*
36 * Module params
37 */
38static uint bnad_msix_disable;
39module_param(bnad_msix_disable, uint, 0444);
40MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42static uint bnad_ioc_auto_recover = 1;
43module_param(bnad_ioc_auto_recover, uint, 0444);
44MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46/*
47 * Global variables
48 */
49u32 bnad_rxqs_per_cq = 2;
50
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000051static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Rasesh Mody8b230ed2010-08-23 20:24:12 -070052
53/*
54 * Local MACROS
55 */
56#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60#define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
Rasesh Mody8811e262011-07-22 08:07:44 +000062 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
Rasesh Mody8b230ed2010-08-23 20:24:12 -070063 ((_bnad)->pcidev->irq))
64
65#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73} while (0)
74
Rasesh Modybe7fa322010-12-23 21:45:01 +000075#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76
Rasesh Mody8b230ed2010-08-23 20:24:12 -070077/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
81bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82{
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
86
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
89
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
99 }
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
102 }
103}
104
Rasesh Mody271e8b72011-08-30 15:27:40 +0000105static u32
106bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108{
109 int j;
110 array[index].skb = NULL;
111
112 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&array[index], dma_addr, 0);
115 BNA_QE_INDX_ADD(index, 1, depth);
116
117 for (j = 0; j < frag; j++) {
118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119 skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120 dma_unmap_addr_set(&array[index], dma_addr, 0);
121 BNA_QE_INDX_ADD(index, 1, depth);
122 }
123
124 return index;
125}
126
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700127/*
128 * Frees all pending Tx Bufs
129 * At this point no activity is expected on the Q,
130 * so DMA unmap & freeing is fine.
131 */
132static void
133bnad_free_all_txbufs(struct bnad *bnad,
134 struct bna_tcb *tcb)
135{
Rasesh Mody0120b992011-07-22 08:07:41 +0000136 u32 unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700137 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138 struct bnad_skb_unmap *unmap_array;
Rasesh Mody0120b992011-07-22 08:07:41 +0000139 struct sk_buff *skb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700140 int i;
141
142 unmap_array = unmap_q->unmap_array;
143
144 unmap_cons = 0;
145 while (unmap_cons < unmap_q->q_depth) {
146 skb = unmap_array[unmap_cons].skb;
147 if (!skb) {
148 unmap_cons++;
149 continue;
150 }
151 unmap_array[unmap_cons].skb = NULL;
152
Ivan Vecera5ea74312011-02-02 04:37:02 +0000153 dma_unmap_single(&bnad->pcidev->dev,
154 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700155 dma_addr), skb_headlen(skb),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000156 DMA_TO_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700157
Ivan Vecera5ea74312011-02-02 04:37:02 +0000158 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000159 if (++unmap_cons >= unmap_q->q_depth)
160 break;
161
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700162 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000163 dma_unmap_page(&bnad->pcidev->dev,
164 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700165 dma_addr),
166 skb_shinfo(skb)->frags[i].size,
Ivan Vecera5ea74312011-02-02 04:37:02 +0000167 DMA_TO_DEVICE);
168 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700169 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000170 if (++unmap_cons >= unmap_q->q_depth)
171 break;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700172 }
173 dev_kfree_skb_any(skb);
174 }
175}
176
177/* Data Path Handlers */
178
179/*
180 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
181 * Can be called in a) Interrupt context
182 * b) Sending context
183 * c) Tasklet context
184 */
185static u32
186bnad_free_txbufs(struct bnad *bnad,
187 struct bna_tcb *tcb)
188{
Rasesh Mody271e8b72011-08-30 15:27:40 +0000189 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
190 u16 wis, updated_hw_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700191 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
192 struct bnad_skb_unmap *unmap_array;
Rasesh Mody0120b992011-07-22 08:07:41 +0000193 struct sk_buff *skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700194
195 /*
196 * Just return if TX is stopped. This check is useful
197 * when bnad_free_txbufs() runs out of a tasklet scheduled
Rasesh Modybe7fa322010-12-23 21:45:01 +0000198 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700199 * but this routine runs actually after the cleanup has been
200 * executed.
201 */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000202 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700203 return 0;
204
205 updated_hw_cons = *(tcb->hw_consumer_index);
206
207 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
208 updated_hw_cons, tcb->q_depth);
209
210 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
211
212 unmap_array = unmap_q->unmap_array;
213 unmap_cons = unmap_q->consumer_index;
214
215 prefetch(&unmap_array[unmap_cons + 1]);
216 while (wis) {
217 skb = unmap_array[unmap_cons].skb;
218
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700219 sent_packets++;
220 sent_bytes += skb->len;
221 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
222
Rasesh Mody271e8b72011-08-30 15:27:40 +0000223 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
224 unmap_cons, unmap_q->q_depth, skb,
225 skb_shinfo(skb)->nr_frags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700226
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700227 dev_kfree_skb_any(skb);
228 }
229
230 /* Update consumer pointers. */
231 tcb->consumer_index = updated_hw_cons;
232 unmap_q->consumer_index = unmap_cons;
233
234 tcb->txq->tx_packets += sent_packets;
235 tcb->txq->tx_bytes += sent_bytes;
236
237 return sent_packets;
238}
239
240/* Tx Free Tasklet function */
241/* Frees for all the tcb's in all the Tx's */
242/*
243 * Scheduled from sending context, so that
244 * the fat Tx lock is not held for too long
245 * in the sending context.
246 */
247static void
248bnad_tx_free_tasklet(unsigned long bnad_ptr)
249{
250 struct bnad *bnad = (struct bnad *)bnad_ptr;
251 struct bna_tcb *tcb;
Rasesh Mody0120b992011-07-22 08:07:41 +0000252 u32 acked = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700253 int i, j;
254
255 for (i = 0; i < bnad->num_tx; i++) {
256 for (j = 0; j < bnad->num_txq_per_tx; j++) {
257 tcb = bnad->tx_info[i].tcb[j];
258 if (!tcb)
259 continue;
260 if (((u16) (*tcb->hw_consumer_index) !=
261 tcb->consumer_index) &&
262 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
263 &tcb->flags))) {
264 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000265 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
266 &tcb->flags)))
267 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700268 smp_mb__before_clear_bit();
269 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
270 }
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000271 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 continue;
274 if (netif_queue_stopped(bnad->netdev)) {
275 if (acked && netif_carrier_ok(bnad->netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(bnad->netdev);
279 /* TODO */
280 /* Counters for individual TxQs? */
281 BNAD_UPDATE_CTR(bnad,
282 netif_queue_wakeup);
283 }
284 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700285 }
286 }
287}
288
289static u32
290bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
291{
292 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000293 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700294
295 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
296 return 0;
297
298 sent = bnad_free_txbufs(bnad, tcb);
299 if (sent) {
300 if (netif_queue_stopped(netdev) &&
301 netif_carrier_ok(netdev) &&
302 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
303 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000304 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
305 netif_wake_queue(netdev);
306 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
307 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700308 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000309 }
310
311 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700312 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700313
314 smp_mb__before_clear_bit();
315 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
316
317 return sent;
318}
319
320/* MSIX Tx Completion Handler */
321static irqreturn_t
322bnad_msix_tx(int irq, void *data)
323{
324 struct bna_tcb *tcb = (struct bna_tcb *)data;
325 struct bnad *bnad = tcb->bnad;
326
327 bnad_tx(bnad, tcb);
328
329 return IRQ_HANDLED;
330}
331
332static void
333bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
334{
335 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
336
337 rcb->producer_index = 0;
338 rcb->consumer_index = 0;
339
340 unmap_q->producer_index = 0;
341 unmap_q->consumer_index = 0;
342}
343
344static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000345bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700346{
347 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000348 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700349 struct sk_buff *skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000350 int unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700351
352 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000353 unmap_array = unmap_q->unmap_array;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000354 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000355 skb = unmap_array[unmap_cons].skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000356 if (!skb)
357 continue;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000358 unmap_array[unmap_cons].skb = NULL;
359 dma_unmap_single(&bnad->pcidev->dev,
360 dma_unmap_addr(&unmap_array[unmap_cons],
361 dma_addr),
362 rcb->rxq->buffer_size,
363 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700364 dev_kfree_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700365 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700366 bnad_reset_rcb(bnad, rcb);
367}
368
369static void
370bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
371{
372 u16 to_alloc, alloced, unmap_prod, wi_range;
373 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
374 struct bnad_skb_unmap *unmap_array;
375 struct bna_rxq_entry *rxent;
376 struct sk_buff *skb;
377 dma_addr_t dma_addr;
378
379 alloced = 0;
380 to_alloc =
381 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
382
383 unmap_array = unmap_q->unmap_array;
384 unmap_prod = unmap_q->producer_index;
385
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
387
388 while (to_alloc--) {
Rasesh Mody19dbff92011-08-30 15:27:41 +0000389 if (!wi_range)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700390 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
391 wi_range);
Eric Dumazet0a0e2342011-07-08 05:29:30 +0000392 skb = netdev_alloc_skb_ip_align(bnad->netdev,
393 rcb->rxq->buffer_size);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700394 if (unlikely(!skb)) {
395 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
Rasesh Mody3caa1e952011-08-30 15:27:42 +0000396 rcb->rxq->rxbuf_alloc_failed++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700397 goto finishing;
398 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700399 unmap_array[unmap_prod].skb = skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000400 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
401 rcb->rxq->buffer_size,
402 DMA_FROM_DEVICE);
403 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700404 dma_addr);
405 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
406 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
407
408 rxent++;
409 wi_range--;
410 alloced++;
411 }
412
413finishing:
414 if (likely(alloced)) {
415 unmap_q->producer_index = unmap_prod;
416 rcb->producer_index = unmap_prod;
417 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000418 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
419 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700420 }
421}
422
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700423static inline void
424bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
425{
426 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
427
428 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
429 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
430 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
431 bnad_alloc_n_post_rxbufs(bnad, rcb);
432 smp_mb__before_clear_bit();
433 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
434 }
435}
436
437static u32
438bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
439{
440 struct bna_cq_entry *cmpl, *next_cmpl;
441 struct bna_rcb *rcb = NULL;
442 unsigned int wi_range, packets = 0, wis = 0;
443 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000444 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700445 struct sk_buff *skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000446 u32 flags, unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700447 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
Rasesh Mody078086f2011-08-08 16:21:39 +0000448 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700449
Rasesh Mody078086f2011-08-08 16:21:39 +0000450 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
451
452 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
453 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000454 return 0;
Rasesh Mody078086f2011-08-08 16:21:39 +0000455 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000456
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700457 prefetch(bnad->netdev);
458 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
459 wi_range);
460 BUG_ON(!(wi_range <= ccb->q_depth));
461 while (cmpl->valid && packets < budget) {
462 packets++;
463 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
464
Rasesh Mody078086f2011-08-08 16:21:39 +0000465 if (bna_is_small_rxq(cmpl->rxq_id))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700466 rcb = ccb->rcb[1];
Rasesh Mody078086f2011-08-08 16:21:39 +0000467 else
468 rcb = ccb->rcb[0];
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700469
470 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000471 unmap_array = unmap_q->unmap_array;
472 unmap_cons = unmap_q->consumer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700473
Ivan Vecera5ea74312011-02-02 04:37:02 +0000474 skb = unmap_array[unmap_cons].skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700475 BUG_ON(!(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +0000476 unmap_array[unmap_cons].skb = NULL;
477 dma_unmap_single(&bnad->pcidev->dev,
478 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700479 dma_addr),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000480 rcb->rxq->buffer_size,
481 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700482 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
483
484 /* Should be more efficient ? Performance ? */
485 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
486
487 wis++;
488 if (likely(--wi_range))
489 next_cmpl = cmpl + 1;
490 else {
491 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
492 wis = 0;
493 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
494 next_cmpl, wi_range);
495 BUG_ON(!(wi_range <= ccb->q_depth));
496 }
497 prefetch(next_cmpl);
498
499 flags = ntohl(cmpl->flags);
500 if (unlikely
501 (flags &
502 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
503 BNA_CQ_EF_TOO_LONG))) {
504 dev_kfree_skb_any(skb);
505 rcb->rxq->rx_packets_with_error++;
506 goto next;
507 }
508
509 skb_put(skb, ntohs(cmpl->length));
510 if (likely
Michał Mirosławe5ee20e2011-04-12 09:38:23 +0000511 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700512 (((flags & BNA_CQ_EF_IPV4) &&
513 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
514 (flags & BNA_CQ_EF_IPV6)) &&
515 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
516 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
517 skb->ip_summed = CHECKSUM_UNNECESSARY;
518 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700519 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700520
521 rcb->rxq->rx_packets++;
522 rcb->rxq->rx_bytes += skb->len;
523 skb->protocol = eth_type_trans(skb, bnad->netdev);
524
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000525 if (flags & BNA_CQ_EF_VLAN)
526 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700527
Rasesh Mody078086f2011-08-08 16:21:39 +0000528 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000529 napi_gro_receive(&rx_ctrl->napi, skb);
Rasesh Mody078086f2011-08-08 16:21:39 +0000530 else {
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000531 netif_receive_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700532 }
533
534next:
535 cmpl->valid = 0;
536 cmpl = next_cmpl;
537 }
538
539 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
540
Rasesh Mody2be67142011-08-30 15:27:39 +0000541 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
Rasesh Mody271e8b72011-08-30 15:27:40 +0000542 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
543
Rasesh Mody2be67142011-08-30 15:27:39 +0000544 bnad_refill_rxq(bnad, ccb->rcb[0]);
545 if (ccb->rcb[1])
546 bnad_refill_rxq(bnad, ccb->rcb[1]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700547
Rasesh Mody078086f2011-08-08 16:21:39 +0000548 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
549
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700550 return packets;
551}
552
553static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700554bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
555{
556 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000557 struct napi_struct *napi = &rx_ctrl->napi;
558
559 if (likely(napi_schedule_prep(napi))) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000560 __napi_schedule(napi);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000561 rx_ctrl->rx_schedule++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700562 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700563}
564
565/* MSIX Rx Path Handler */
566static irqreturn_t
567bnad_msix_rx(int irq, void *data)
568{
569 struct bna_ccb *ccb = (struct bna_ccb *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700570
Rasesh Mody271e8b72011-08-30 15:27:40 +0000571 if (ccb) {
572 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
Rasesh Mody2be67142011-08-30 15:27:39 +0000573 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000574 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700575
576 return IRQ_HANDLED;
577}
578
579/* Interrupt handlers */
580
581/* Mbox Interrupt Handlers */
582static irqreturn_t
583bnad_msix_mbox_handler(int irq, void *data)
584{
585 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000586 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000587 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700588
Rasesh Modybe7fa322010-12-23 21:45:01 +0000589 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
590 return IRQ_HANDLED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700591
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700592 spin_lock_irqsave(&bnad->bna_lock, flags);
593
594 bna_intr_status_get(&bnad->bna, intr_status);
595
Rasesh Mody078086f2011-08-08 16:21:39 +0000596 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700597 bna_mbox_handler(&bnad->bna, intr_status);
598
599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
600
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700601 return IRQ_HANDLED;
602}
603
604static irqreturn_t
605bnad_isr(int irq, void *data)
606{
607 int i, j;
608 u32 intr_status;
609 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000610 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700611 struct bnad_rx_info *rx_info;
612 struct bnad_rx_ctrl *rx_ctrl;
Rasesh Mody078086f2011-08-08 16:21:39 +0000613 struct bna_tcb *tcb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700614
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000615 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
616 return IRQ_NONE;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700617
618 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000619
620 if (unlikely(!intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700621 return IRQ_NONE;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000622
623 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700624
Rasesh Mody078086f2011-08-08 16:21:39 +0000625 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700626 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000627
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700628 spin_unlock_irqrestore(&bnad->bna_lock, flags);
629
Rasesh Modybe7fa322010-12-23 21:45:01 +0000630 if (!BNA_IS_INTX_DATA_INTR(intr_status))
631 return IRQ_HANDLED;
632
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700633 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000634 /* Tx processing */
635 for (i = 0; i < bnad->num_tx; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000636 for (j = 0; j < bnad->num_txq_per_tx; j++) {
637 tcb = bnad->tx_info[i].tcb[j];
638 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
639 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
640 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000641 }
642 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700643 for (i = 0; i < bnad->num_rx; i++) {
644 rx_info = &bnad->rx_info[i];
645 if (!rx_info->rx)
646 continue;
647 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
648 rx_ctrl = &rx_info->rx_ctrl[j];
649 if (rx_ctrl->ccb)
650 bnad_netif_rx_schedule_poll(bnad,
651 rx_ctrl->ccb);
652 }
653 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700654 return IRQ_HANDLED;
655}
656
657/*
658 * Called in interrupt / callback context
659 * with bna_lock held, so cfg_flags access is OK
660 */
661static void
662bnad_enable_mbox_irq(struct bnad *bnad)
663{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000664 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000665
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700666 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
667}
668
669/*
670 * Called with bnad->bna_lock held b'cos of
671 * bnad->cfg_flags access.
672 */
Rasesh Modyb7ee31c52010-10-05 15:46:05 +0000673static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700674bnad_disable_mbox_irq(struct bnad *bnad)
675{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000676 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000677
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700678 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
679}
680
Rasesh Modybe7fa322010-12-23 21:45:01 +0000681static void
682bnad_set_netdev_perm_addr(struct bnad *bnad)
683{
684 struct net_device *netdev = bnad->netdev;
685
686 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
687 if (is_zero_ether_addr(netdev->dev_addr))
688 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
689}
690
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700691/* Control Path Handlers */
692
693/* Callbacks */
694void
Rasesh Mody078086f2011-08-08 16:21:39 +0000695bnad_cb_mbox_intr_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700696{
697 bnad_enable_mbox_irq(bnad);
698}
699
700void
Rasesh Mody078086f2011-08-08 16:21:39 +0000701bnad_cb_mbox_intr_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700702{
703 bnad_disable_mbox_irq(bnad);
704}
705
706void
Rasesh Mody078086f2011-08-08 16:21:39 +0000707bnad_cb_ioceth_ready(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700708{
Rasesh Mody078086f2011-08-08 16:21:39 +0000709 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700710 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700711}
712
713void
Rasesh Mody078086f2011-08-08 16:21:39 +0000714bnad_cb_ioceth_failed(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700715{
Rasesh Mody078086f2011-08-08 16:21:39 +0000716 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700717 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody078086f2011-08-08 16:21:39 +0000718}
719
720void
721bnad_cb_ioceth_disabled(struct bnad *bnad)
722{
723 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
724 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700725}
726
727static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000728bnad_cb_enet_disabled(void *arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700729{
730 struct bnad *bnad = (struct bnad *)arg;
731
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700732 netif_carrier_off(bnad->netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +0000733 complete(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700734}
735
736void
Rasesh Mody078086f2011-08-08 16:21:39 +0000737bnad_cb_ethport_link_status(struct bnad *bnad,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700738 enum bna_link_status link_status)
739{
740 bool link_up = 0;
741
742 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
743
744 if (link_status == BNA_CEE_UP) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000745 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
746 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700747 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000748 } else {
749 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
750 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700751 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000752 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700753
754 if (link_up) {
755 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000756 uint tx_id, tcb_id;
757 printk(KERN_WARNING "bna: %s link up\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700758 bnad->netdev->name);
759 netif_carrier_on(bnad->netdev);
760 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Mody078086f2011-08-08 16:21:39 +0000761 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
762 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
763 tcb_id++) {
764 struct bna_tcb *tcb =
765 bnad->tx_info[tx_id].tcb[tcb_id];
766 u32 txq_id;
767 if (!tcb)
768 continue;
769
770 txq_id = tcb->id;
771
772 if (test_bit(BNAD_TXQ_TX_STARTED,
773 &tcb->flags)) {
774 /*
775 * Force an immediate
776 * Transmit Schedule */
777 printk(KERN_INFO "bna: %s %d "
778 "TXQ_STARTED\n",
779 bnad->netdev->name,
780 txq_id);
781 netif_wake_subqueue(
782 bnad->netdev,
783 txq_id);
784 BNAD_UPDATE_CTR(bnad,
785 netif_queue_wakeup);
786 } else {
787 netif_stop_subqueue(
788 bnad->netdev,
789 txq_id);
790 BNAD_UPDATE_CTR(bnad,
791 netif_queue_stop);
792 }
793 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700794 }
795 }
796 } else {
797 if (netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000798 printk(KERN_WARNING "bna: %s link down\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700799 bnad->netdev->name);
800 netif_carrier_off(bnad->netdev);
801 BNAD_UPDATE_CTR(bnad, link_toggle);
802 }
803 }
804}
805
806static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000807bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700808{
809 struct bnad *bnad = (struct bnad *)arg;
810
811 complete(&bnad->bnad_completions.tx_comp);
812}
813
814static void
815bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
816{
817 struct bnad_tx_info *tx_info =
818 (struct bnad_tx_info *)tcb->txq->tx->priv;
819 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
820
821 tx_info->tcb[tcb->id] = tcb;
822 unmap_q->producer_index = 0;
823 unmap_q->consumer_index = 0;
824 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
825}
826
827static void
828bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
829{
830 struct bnad_tx_info *tx_info =
831 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000832 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
833
834 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
835 cpu_relax();
836
837 bnad_free_all_txbufs(bnad, tcb);
838
839 unmap_q->producer_index = 0;
840 unmap_q->consumer_index = 0;
841
842 smp_mb__before_clear_bit();
843 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700844
845 tx_info->tcb[tcb->id] = NULL;
846}
847
848static void
849bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
850{
851 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
852
853 unmap_q->producer_index = 0;
854 unmap_q->consumer_index = 0;
855 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
856}
857
858static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000859bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
860{
861 bnad_free_all_rxbufs(bnad, rcb);
862}
863
864static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700865bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
866{
867 struct bnad_rx_info *rx_info =
868 (struct bnad_rx_info *)ccb->cq->rx->priv;
869
870 rx_info->rx_ctrl[ccb->id].ccb = ccb;
871 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
872}
873
874static void
875bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
876{
877 struct bnad_rx_info *rx_info =
878 (struct bnad_rx_info *)ccb->cq->rx->priv;
879
880 rx_info->rx_ctrl[ccb->id].ccb = NULL;
881}
882
883static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000884bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700885{
886 struct bnad_tx_info *tx_info =
Rasesh Mody078086f2011-08-08 16:21:39 +0000887 (struct bnad_tx_info *)tx->priv;
888 struct bna_tcb *tcb;
889 u32 txq_id;
890 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700891
Rasesh Mody078086f2011-08-08 16:21:39 +0000892 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
893 tcb = tx_info->tcb[i];
894 if (!tcb)
895 continue;
896 txq_id = tcb->id;
897 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
898 netif_stop_subqueue(bnad->netdev, txq_id);
899 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
900 bnad->netdev->name, txq_id);
901 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700902}
903
904static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000905bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700906{
Rasesh Mody078086f2011-08-08 16:21:39 +0000907 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
908 struct bna_tcb *tcb;
909 struct bnad_unmap_q *unmap_q;
910 u32 txq_id;
911 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000912
Rasesh Mody078086f2011-08-08 16:21:39 +0000913 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
914 tcb = tx_info->tcb[i];
915 if (!tcb)
916 continue;
917 txq_id = tcb->id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700918
Rasesh Mody078086f2011-08-08 16:21:39 +0000919 unmap_q = tcb->unmap_q;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000920
Rasesh Mody078086f2011-08-08 16:21:39 +0000921 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
922 continue;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000923
Rasesh Mody078086f2011-08-08 16:21:39 +0000924 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
925 cpu_relax();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000926
Rasesh Mody078086f2011-08-08 16:21:39 +0000927 bnad_free_all_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000928
Rasesh Mody078086f2011-08-08 16:21:39 +0000929 unmap_q->producer_index = 0;
930 unmap_q->consumer_index = 0;
931
932 smp_mb__before_clear_bit();
933 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
934
935 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
936
937 if (netif_carrier_ok(bnad->netdev)) {
938 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
939 bnad->netdev->name, txq_id);
940 netif_wake_subqueue(bnad->netdev, txq_id);
941 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
942 }
943 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000944
945 /*
Rasesh Mody078086f2011-08-08 16:21:39 +0000946 * Workaround for first ioceth enable failure & we
Rasesh Modybe7fa322010-12-23 21:45:01 +0000947 * get a 0 MAC address. We try to get the MAC address
948 * again here.
949 */
950 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000951 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000952 bnad_set_netdev_perm_addr(bnad);
953 }
Rasesh Mody078086f2011-08-08 16:21:39 +0000954}
Rasesh Modybe7fa322010-12-23 21:45:01 +0000955
Rasesh Mody078086f2011-08-08 16:21:39 +0000956static void
957bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
958{
959 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
960 struct bna_tcb *tcb;
961 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000962
Rasesh Mody078086f2011-08-08 16:21:39 +0000963 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
964 tcb = tx_info->tcb[i];
965 if (!tcb)
966 continue;
967 }
968
969 mdelay(BNAD_TXRX_SYNC_MDELAY);
970 bna_tx_cleanup_complete(tx);
971}
972
973static void
974bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
975{
976 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
977 struct bna_ccb *ccb;
978 struct bnad_rx_ctrl *rx_ctrl;
979 int i;
980
981 mdelay(BNAD_TXRX_SYNC_MDELAY);
982
Rasesh Mody772b5232011-08-30 15:27:37 +0000983 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000984 rx_ctrl = &rx_info->rx_ctrl[i];
985 ccb = rx_ctrl->ccb;
986 if (!ccb)
987 continue;
988
989 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
990
991 if (ccb->rcb[1])
992 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
993
994 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
995 cpu_relax();
996 }
997
998 bna_rx_cleanup_complete(rx);
999}
1000
1001static void
1002bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1003{
1004 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1005 struct bna_ccb *ccb;
1006 struct bna_rcb *rcb;
1007 struct bnad_rx_ctrl *rx_ctrl;
1008 struct bnad_unmap_q *unmap_q;
1009 int i;
1010 int j;
1011
Rasesh Mody772b5232011-08-30 15:27:37 +00001012 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +00001013 rx_ctrl = &rx_info->rx_ctrl[i];
1014 ccb = rx_ctrl->ccb;
1015 if (!ccb)
1016 continue;
1017
1018 bnad_cq_cmpl_init(bnad, ccb);
1019
1020 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1021 rcb = ccb->rcb[j];
1022 if (!rcb)
1023 continue;
1024 bnad_free_all_rxbufs(bnad, rcb);
1025
1026 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1027 unmap_q = rcb->unmap_q;
1028
1029 /* Now allocate & post buffers for this RCB */
1030 /* !!Allocation in callback context */
1031 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1032 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1033 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1034 bnad_alloc_n_post_rxbufs(bnad, rcb);
1035 smp_mb__before_clear_bit();
1036 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1037 }
1038 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001039 }
1040}
1041
1042static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001043bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001044{
1045 struct bnad *bnad = (struct bnad *)arg;
1046
1047 complete(&bnad->bnad_completions.rx_comp);
1048}
1049
1050static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001051bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001052{
Rasesh Mody078086f2011-08-08 16:21:39 +00001053 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001054 complete(&bnad->bnad_completions.mcast_comp);
1055}
1056
1057void
1058bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1059 struct bna_stats *stats)
1060{
1061 if (status == BNA_CB_SUCCESS)
1062 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1063
1064 if (!netif_running(bnad->netdev) ||
1065 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1066 return;
1067
1068 mod_timer(&bnad->stats_timer,
1069 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1070}
1071
Rasesh Mody078086f2011-08-08 16:21:39 +00001072static void
1073bnad_cb_enet_mtu_set(struct bnad *bnad)
1074{
1075 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1076 complete(&bnad->bnad_completions.mtu_comp);
1077}
1078
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001079/* Resource allocation, free functions */
1080
1081static void
1082bnad_mem_free(struct bnad *bnad,
1083 struct bna_mem_info *mem_info)
1084{
1085 int i;
1086 dma_addr_t dma_pa;
1087
1088 if (mem_info->mdl == NULL)
1089 return;
1090
1091 for (i = 0; i < mem_info->num; i++) {
1092 if (mem_info->mdl[i].kva != NULL) {
1093 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1094 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1095 dma_pa);
Ivan Vecera5ea74312011-02-02 04:37:02 +00001096 dma_free_coherent(&bnad->pcidev->dev,
1097 mem_info->mdl[i].len,
1098 mem_info->mdl[i].kva, dma_pa);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001099 } else
1100 kfree(mem_info->mdl[i].kva);
1101 }
1102 }
1103 kfree(mem_info->mdl);
1104 mem_info->mdl = NULL;
1105}
1106
1107static int
1108bnad_mem_alloc(struct bnad *bnad,
1109 struct bna_mem_info *mem_info)
1110{
1111 int i;
1112 dma_addr_t dma_pa;
1113
1114 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1115 mem_info->mdl = NULL;
1116 return 0;
1117 }
1118
1119 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1120 GFP_KERNEL);
1121 if (mem_info->mdl == NULL)
1122 return -ENOMEM;
1123
1124 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1125 for (i = 0; i < mem_info->num; i++) {
1126 mem_info->mdl[i].len = mem_info->len;
1127 mem_info->mdl[i].kva =
Ivan Vecera5ea74312011-02-02 04:37:02 +00001128 dma_alloc_coherent(&bnad->pcidev->dev,
1129 mem_info->len, &dma_pa,
1130 GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001131
1132 if (mem_info->mdl[i].kva == NULL)
1133 goto err_return;
1134
1135 BNA_SET_DMA_ADDR(dma_pa,
1136 &(mem_info->mdl[i].dma));
1137 }
1138 } else {
1139 for (i = 0; i < mem_info->num; i++) {
1140 mem_info->mdl[i].len = mem_info->len;
1141 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1142 GFP_KERNEL);
1143 if (mem_info->mdl[i].kva == NULL)
1144 goto err_return;
1145 }
1146 }
1147
1148 return 0;
1149
1150err_return:
1151 bnad_mem_free(bnad, mem_info);
1152 return -ENOMEM;
1153}
1154
1155/* Free IRQ for Mailbox */
1156static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001157bnad_mbox_irq_free(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001158{
1159 int irq;
1160 unsigned long flags;
1161
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001162 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001163 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001164 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001165
1166 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001167 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001168}
1169
1170/*
1171 * Allocates IRQ for Mailbox, but keep it disabled
1172 * This will be enabled once we get the mbox enable callback
1173 * from bna
1174 */
1175static int
Rasesh Mody078086f2011-08-08 16:21:39 +00001176bnad_mbox_irq_alloc(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001177{
Rasesh Mody0120b992011-07-22 08:07:41 +00001178 int err = 0;
1179 unsigned long irq_flags, flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001180 u32 irq;
Rasesh Mody0120b992011-07-22 08:07:41 +00001181 irq_handler_t irq_handler;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001182
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001183 spin_lock_irqsave(&bnad->bna_lock, flags);
1184 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1185 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
Rasesh Mody8811e262011-07-22 08:07:44 +00001186 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Shyam Iyer82791712011-07-14 15:00:32 +00001187 irq_flags = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001188 } else {
1189 irq_handler = (irq_handler_t)bnad_isr;
1190 irq = bnad->pcidev->irq;
Shyam Iyer5f778982011-06-28 08:58:05 +00001191 irq_flags = IRQF_SHARED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001192 }
Rasesh Mody8811e262011-07-22 08:07:44 +00001193
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001194 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001195 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1196
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001197 /*
1198 * Set the Mbox IRQ disable flag, so that the IRQ handler
1199 * called from request_irq() for SHARED IRQs do not execute
1200 */
1201 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1202
Rasesh Modybe7fa322010-12-23 21:45:01 +00001203 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1204
Shyam Iyer82791712011-07-14 15:00:32 +00001205 err = request_irq(irq, irq_handler, irq_flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001206 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001207
Rasesh Modybe7fa322010-12-23 21:45:01 +00001208 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001209}
1210
1211static void
1212bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1213{
1214 kfree(intr_info->idl);
1215 intr_info->idl = NULL;
1216}
1217
1218/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1219static int
1220bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
Rasesh Mody078086f2011-08-08 16:21:39 +00001221 u32 txrx_id, struct bna_intr_info *intr_info)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001222{
1223 int i, vector_start = 0;
1224 u32 cfg_flags;
1225 unsigned long flags;
1226
1227 spin_lock_irqsave(&bnad->bna_lock, flags);
1228 cfg_flags = bnad->cfg_flags;
1229 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1230
1231 if (cfg_flags & BNAD_CF_MSIX) {
1232 intr_info->intr_type = BNA_INTR_T_MSIX;
1233 intr_info->idl = kcalloc(intr_info->num,
1234 sizeof(struct bna_intr_descr),
1235 GFP_KERNEL);
1236 if (!intr_info->idl)
1237 return -ENOMEM;
1238
1239 switch (src) {
1240 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001241 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001242 break;
1243
1244 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001245 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1246 (bnad->num_tx * bnad->num_txq_per_tx) +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001247 txrx_id;
1248 break;
1249
1250 default:
1251 BUG();
1252 }
1253
1254 for (i = 0; i < intr_info->num; i++)
1255 intr_info->idl[i].vector = vector_start + i;
1256 } else {
1257 intr_info->intr_type = BNA_INTR_T_INTX;
1258 intr_info->num = 1;
1259 intr_info->idl = kcalloc(intr_info->num,
1260 sizeof(struct bna_intr_descr),
1261 GFP_KERNEL);
1262 if (!intr_info->idl)
1263 return -ENOMEM;
1264
1265 switch (src) {
1266 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001267 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001268 break;
1269
1270 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001271 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001272 break;
1273 }
1274 }
1275 return 0;
1276}
1277
1278/**
1279 * NOTE: Should be called for MSIX only
1280 * Unregisters Tx MSIX vector(s) from the kernel
1281 */
1282static void
1283bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1284 int num_txqs)
1285{
1286 int i;
1287 int vector_num;
1288
1289 for (i = 0; i < num_txqs; i++) {
1290 if (tx_info->tcb[i] == NULL)
1291 continue;
1292
1293 vector_num = tx_info->tcb[i]->intr_vector;
1294 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1295 }
1296}
1297
1298/**
1299 * NOTE: Should be called for MSIX only
1300 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1301 */
1302static int
1303bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001304 u32 tx_id, int num_txqs)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001305{
1306 int i;
1307 int err;
1308 int vector_num;
1309
1310 for (i = 0; i < num_txqs; i++) {
1311 vector_num = tx_info->tcb[i]->intr_vector;
1312 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1313 tx_id + tx_info->tcb[i]->id);
1314 err = request_irq(bnad->msix_table[vector_num].vector,
1315 (irq_handler_t)bnad_msix_tx, 0,
1316 tx_info->tcb[i]->name,
1317 tx_info->tcb[i]);
1318 if (err)
1319 goto err_return;
1320 }
1321
1322 return 0;
1323
1324err_return:
1325 if (i > 0)
1326 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1327 return -1;
1328}
1329
1330/**
1331 * NOTE: Should be called for MSIX only
1332 * Unregisters Rx MSIX vector(s) from the kernel
1333 */
1334static void
1335bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1336 int num_rxps)
1337{
1338 int i;
1339 int vector_num;
1340
1341 for (i = 0; i < num_rxps; i++) {
1342 if (rx_info->rx_ctrl[i].ccb == NULL)
1343 continue;
1344
1345 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1346 free_irq(bnad->msix_table[vector_num].vector,
1347 rx_info->rx_ctrl[i].ccb);
1348 }
1349}
1350
1351/**
1352 * NOTE: Should be called for MSIX only
1353 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1354 */
1355static int
1356bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001357 u32 rx_id, int num_rxps)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001358{
1359 int i;
1360 int err;
1361 int vector_num;
1362
1363 for (i = 0; i < num_rxps; i++) {
1364 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1365 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1366 bnad->netdev->name,
1367 rx_id + rx_info->rx_ctrl[i].ccb->id);
1368 err = request_irq(bnad->msix_table[vector_num].vector,
1369 (irq_handler_t)bnad_msix_rx, 0,
1370 rx_info->rx_ctrl[i].ccb->name,
1371 rx_info->rx_ctrl[i].ccb);
1372 if (err)
1373 goto err_return;
1374 }
1375
1376 return 0;
1377
1378err_return:
1379 if (i > 0)
1380 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1381 return -1;
1382}
1383
1384/* Free Tx object Resources */
1385static void
1386bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1387{
1388 int i;
1389
1390 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1391 if (res_info[i].res_type == BNA_RES_T_MEM)
1392 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1393 else if (res_info[i].res_type == BNA_RES_T_INTR)
1394 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1395 }
1396}
1397
1398/* Allocates memory and interrupt resources for Tx object */
1399static int
1400bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001401 u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001402{
1403 int i, err = 0;
1404
1405 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1406 if (res_info[i].res_type == BNA_RES_T_MEM)
1407 err = bnad_mem_alloc(bnad,
1408 &res_info[i].res_u.mem_info);
1409 else if (res_info[i].res_type == BNA_RES_T_INTR)
1410 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1411 &res_info[i].res_u.intr_info);
1412 if (err)
1413 goto err_return;
1414 }
1415 return 0;
1416
1417err_return:
1418 bnad_tx_res_free(bnad, res_info);
1419 return err;
1420}
1421
1422/* Free Rx object Resources */
1423static void
1424bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1425{
1426 int i;
1427
1428 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1429 if (res_info[i].res_type == BNA_RES_T_MEM)
1430 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1431 else if (res_info[i].res_type == BNA_RES_T_INTR)
1432 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1433 }
1434}
1435
1436/* Allocates memory and interrupt resources for Rx object */
1437static int
1438bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1439 uint rx_id)
1440{
1441 int i, err = 0;
1442
1443 /* All memory needs to be allocated before setup_ccbs */
1444 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1445 if (res_info[i].res_type == BNA_RES_T_MEM)
1446 err = bnad_mem_alloc(bnad,
1447 &res_info[i].res_u.mem_info);
1448 else if (res_info[i].res_type == BNA_RES_T_INTR)
1449 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1450 &res_info[i].res_u.intr_info);
1451 if (err)
1452 goto err_return;
1453 }
1454 return 0;
1455
1456err_return:
1457 bnad_rx_res_free(bnad, res_info);
1458 return err;
1459}
1460
1461/* Timer callbacks */
1462/* a) IOC timer */
1463static void
1464bnad_ioc_timeout(unsigned long data)
1465{
1466 struct bnad *bnad = (struct bnad *)data;
1467 unsigned long flags;
1468
1469 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001470 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001471 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1472}
1473
1474static void
1475bnad_ioc_hb_check(unsigned long data)
1476{
1477 struct bnad *bnad = (struct bnad *)data;
1478 unsigned long flags;
1479
1480 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001481 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001482 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1483}
1484
1485static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001486bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001487{
1488 struct bnad *bnad = (struct bnad *)data;
1489 unsigned long flags;
1490
1491 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001492 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001493 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1494}
1495
1496static void
1497bnad_iocpf_sem_timeout(unsigned long data)
1498{
1499 struct bnad *bnad = (struct bnad *)data;
1500 unsigned long flags;
1501
1502 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001503 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001504 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1505}
1506
1507/*
1508 * All timer routines use bnad->bna_lock to protect against
1509 * the following race, which may occur in case of no locking:
Rasesh Mody0120b992011-07-22 08:07:41 +00001510 * Time CPU m CPU n
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001511 * 0 1 = test_bit
1512 * 1 clear_bit
1513 * 2 del_timer_sync
1514 * 3 mod_timer
1515 */
1516
1517/* b) Dynamic Interrupt Moderation Timer */
1518static void
1519bnad_dim_timeout(unsigned long data)
1520{
1521 struct bnad *bnad = (struct bnad *)data;
1522 struct bnad_rx_info *rx_info;
1523 struct bnad_rx_ctrl *rx_ctrl;
1524 int i, j;
1525 unsigned long flags;
1526
1527 if (!netif_carrier_ok(bnad->netdev))
1528 return;
1529
1530 spin_lock_irqsave(&bnad->bna_lock, flags);
1531 for (i = 0; i < bnad->num_rx; i++) {
1532 rx_info = &bnad->rx_info[i];
1533 if (!rx_info->rx)
1534 continue;
1535 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1536 rx_ctrl = &rx_info->rx_ctrl[j];
1537 if (!rx_ctrl->ccb)
1538 continue;
1539 bna_rx_dim_update(rx_ctrl->ccb);
1540 }
1541 }
1542
1543 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1544 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1545 mod_timer(&bnad->dim_timer,
1546 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1547 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1548}
1549
1550/* c) Statistics Timer */
1551static void
1552bnad_stats_timeout(unsigned long data)
1553{
1554 struct bnad *bnad = (struct bnad *)data;
1555 unsigned long flags;
1556
1557 if (!netif_running(bnad->netdev) ||
1558 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1559 return;
1560
1561 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001562 bna_hw_stats_get(&bnad->bna);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001563 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1564}
1565
1566/*
1567 * Set up timer for DIM
1568 * Called with bnad->bna_lock held
1569 */
1570void
1571bnad_dim_timer_start(struct bnad *bnad)
1572{
1573 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1574 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1575 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1576 (unsigned long)bnad);
1577 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1578 mod_timer(&bnad->dim_timer,
1579 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1580 }
1581}
1582
1583/*
1584 * Set up timer for statistics
1585 * Called with mutex_lock(&bnad->conf_mutex) held
1586 */
1587static void
1588bnad_stats_timer_start(struct bnad *bnad)
1589{
1590 unsigned long flags;
1591
1592 spin_lock_irqsave(&bnad->bna_lock, flags);
1593 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1594 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1595 (unsigned long)bnad);
1596 mod_timer(&bnad->stats_timer,
1597 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1598 }
1599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001600}
1601
1602/*
1603 * Stops the stats timer
1604 * Called with mutex_lock(&bnad->conf_mutex) held
1605 */
1606static void
1607bnad_stats_timer_stop(struct bnad *bnad)
1608{
1609 int to_del = 0;
1610 unsigned long flags;
1611
1612 spin_lock_irqsave(&bnad->bna_lock, flags);
1613 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1614 to_del = 1;
1615 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1616 if (to_del)
1617 del_timer_sync(&bnad->stats_timer);
1618}
1619
1620/* Utilities */
1621
1622static void
1623bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1624{
1625 int i = 1; /* Index 0 has broadcast address */
1626 struct netdev_hw_addr *mc_addr;
1627
1628 netdev_for_each_mc_addr(mc_addr, netdev) {
1629 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1630 ETH_ALEN);
1631 i++;
1632 }
1633}
1634
1635static int
1636bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1637{
1638 struct bnad_rx_ctrl *rx_ctrl =
1639 container_of(napi, struct bnad_rx_ctrl, napi);
Rasesh Mody2be67142011-08-30 15:27:39 +00001640 struct bnad *bnad = rx_ctrl->bnad;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001641 int rcvd = 0;
1642
Rasesh Mody271e8b72011-08-30 15:27:40 +00001643 rx_ctrl->rx_poll_ctr++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001644
1645 if (!netif_carrier_ok(bnad->netdev))
1646 goto poll_exit;
1647
Rasesh Mody2be67142011-08-30 15:27:39 +00001648 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001649 if (rcvd >= budget)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001650 return rcvd;
1651
1652poll_exit:
Rasesh Mody19dbff92011-08-30 15:27:41 +00001653 napi_complete(napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001654
Rasesh Mody271e8b72011-08-30 15:27:40 +00001655 rx_ctrl->rx_complete++;
Rasesh Mody2be67142011-08-30 15:27:39 +00001656
1657 if (rx_ctrl->ccb)
Rasesh Mody271e8b72011-08-30 15:27:40 +00001658 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1659
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001660 return rcvd;
1661}
1662
Rasesh Mody2be67142011-08-30 15:27:39 +00001663#define BNAD_NAPI_POLL_QUOTA 64
1664static void
1665bnad_napi_init(struct bnad *bnad, u32 rx_id)
1666{
1667 struct bnad_rx_ctrl *rx_ctrl;
1668 int i;
1669
1670 /* Initialize & enable NAPI */
1671 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1672 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1673 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1674 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1675 }
1676}
1677
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001678static void
1679bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1680{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001681 struct bnad_rx_ctrl *rx_ctrl;
1682 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001683
1684 /* Initialize & enable NAPI */
1685 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1686 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
Rasesh Modybe7fa322010-12-23 21:45:01 +00001687
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001688 napi_enable(&rx_ctrl->napi);
1689 }
1690}
1691
1692static void
1693bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1694{
1695 int i;
1696
1697 /* First disable and then clean up */
1698 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1699 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1700 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1701 }
1702}
1703
1704/* Should be held with conf_lock held */
1705void
Rasesh Mody078086f2011-08-08 16:21:39 +00001706bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001707{
1708 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1709 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1710 unsigned long flags;
1711
1712 if (!tx_info->tx)
1713 return;
1714
1715 init_completion(&bnad->bnad_completions.tx_comp);
1716 spin_lock_irqsave(&bnad->bna_lock, flags);
1717 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1718 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1719 wait_for_completion(&bnad->bnad_completions.tx_comp);
1720
1721 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1722 bnad_tx_msix_unregister(bnad, tx_info,
1723 bnad->num_txq_per_tx);
1724
Rasesh Mody2be67142011-08-30 15:27:39 +00001725 if (0 == tx_id)
1726 tasklet_kill(&bnad->tx_free_tasklet);
1727
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001728 spin_lock_irqsave(&bnad->bna_lock, flags);
1729 bna_tx_destroy(tx_info->tx);
1730 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1731
1732 tx_info->tx = NULL;
Rasesh Mody078086f2011-08-08 16:21:39 +00001733 tx_info->tx_id = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001734
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001735 bnad_tx_res_free(bnad, res_info);
1736}
1737
1738/* Should be held with conf_lock held */
1739int
Rasesh Mody078086f2011-08-08 16:21:39 +00001740bnad_setup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001741{
1742 int err;
1743 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1744 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1745 struct bna_intr_info *intr_info =
1746 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1747 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1748 struct bna_tx_event_cbfn tx_cbfn;
1749 struct bna_tx *tx;
1750 unsigned long flags;
1751
Rasesh Mody078086f2011-08-08 16:21:39 +00001752 tx_info->tx_id = tx_id;
1753
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001754 /* Initialize the Tx object configuration */
1755 tx_config->num_txq = bnad->num_txq_per_tx;
1756 tx_config->txq_depth = bnad->txq_depth;
1757 tx_config->tx_type = BNA_TX_T_REGULAR;
Rasesh Mody078086f2011-08-08 16:21:39 +00001758 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001759
1760 /* Initialize the tx event handlers */
1761 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1762 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1763 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1764 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1765 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1766
1767 /* Get BNA's resource requirement for one tx object */
1768 spin_lock_irqsave(&bnad->bna_lock, flags);
1769 bna_tx_res_req(bnad->num_txq_per_tx,
1770 bnad->txq_depth, res_info);
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772
1773 /* Fill Unmap Q memory requirements */
1774 BNAD_FILL_UNMAPQ_MEM_REQ(
1775 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1776 bnad->num_txq_per_tx,
1777 BNAD_TX_UNMAPQ_DEPTH);
1778
1779 /* Allocate resources */
1780 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1781 if (err)
1782 return err;
1783
1784 /* Ask BNA to create one Tx object, supplying required resources */
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1787 tx_info);
1788 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1789 if (!tx)
1790 goto err_return;
1791 tx_info->tx = tx;
1792
1793 /* Register ISR for the Tx object */
1794 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1795 err = bnad_tx_msix_register(bnad, tx_info,
1796 tx_id, bnad->num_txq_per_tx);
1797 if (err)
1798 goto err_return;
1799 }
1800
1801 spin_lock_irqsave(&bnad->bna_lock, flags);
1802 bna_tx_enable(tx);
1803 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1804
1805 return 0;
1806
1807err_return:
1808 bnad_tx_res_free(bnad, res_info);
1809 return err;
1810}
1811
1812/* Setup the rx config for bna_rx_create */
1813/* bnad decides the configuration */
1814static void
1815bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1816{
1817 rx_config->rx_type = BNA_RX_T_REGULAR;
1818 rx_config->num_paths = bnad->num_rxp_per_rx;
Rasesh Mody078086f2011-08-08 16:21:39 +00001819 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001820
1821 if (bnad->num_rxp_per_rx > 1) {
1822 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1823 rx_config->rss_config.hash_type =
Rasesh Mody078086f2011-08-08 16:21:39 +00001824 (BFI_ENET_RSS_IPV6 |
1825 BFI_ENET_RSS_IPV6_TCP |
1826 BFI_ENET_RSS_IPV4 |
1827 BFI_ENET_RSS_IPV4_TCP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001828 rx_config->rss_config.hash_mask =
1829 bnad->num_rxp_per_rx - 1;
1830 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1831 sizeof(rx_config->rss_config.toeplitz_hash_key));
1832 } else {
1833 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1834 memset(&rx_config->rss_config, 0,
1835 sizeof(rx_config->rss_config));
1836 }
1837 rx_config->rxp_type = BNA_RXP_SLR;
1838 rx_config->q_depth = bnad->rxq_depth;
1839
1840 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1841
1842 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1843}
1844
Rasesh Mody2be67142011-08-30 15:27:39 +00001845static void
1846bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1847{
1848 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1849 int i;
1850
1851 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1852 rx_info->rx_ctrl[i].bnad = bnad;
1853}
1854
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001855/* Called with mutex_lock(&bnad->conf_mutex) held */
1856void
Rasesh Mody078086f2011-08-08 16:21:39 +00001857bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001858{
1859 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1860 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1861 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1862 unsigned long flags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00001863 int to_del = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001864
1865 if (!rx_info->rx)
1866 return;
1867
1868 if (0 == rx_id) {
1869 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001870 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1871 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001872 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001873 to_del = 1;
1874 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001875 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001876 if (to_del)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001877 del_timer_sync(&bnad->dim_timer);
1878 }
1879
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001880 init_completion(&bnad->bnad_completions.rx_comp);
1881 spin_lock_irqsave(&bnad->bna_lock, flags);
1882 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1883 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1884 wait_for_completion(&bnad->bnad_completions.rx_comp);
1885
1886 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1887 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1888
Rasesh Mody2be67142011-08-30 15:27:39 +00001889 bnad_napi_disable(bnad, rx_id);
1890
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001891 spin_lock_irqsave(&bnad->bna_lock, flags);
1892 bna_rx_destroy(rx_info->rx);
1893 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1894
1895 rx_info->rx = NULL;
Rasesh Mody3caa1e952011-08-30 15:27:42 +00001896 rx_info->rx_id = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001897
1898 bnad_rx_res_free(bnad, res_info);
1899}
1900
1901/* Called with mutex_lock(&bnad->conf_mutex) held */
1902int
Rasesh Mody078086f2011-08-08 16:21:39 +00001903bnad_setup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001904{
1905 int err;
1906 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1907 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1908 struct bna_intr_info *intr_info =
1909 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1910 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1911 struct bna_rx_event_cbfn rx_cbfn;
1912 struct bna_rx *rx;
1913 unsigned long flags;
1914
Rasesh Mody078086f2011-08-08 16:21:39 +00001915 rx_info->rx_id = rx_id;
1916
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001917 /* Initialize the Rx object configuration */
1918 bnad_init_rx_config(bnad, rx_config);
1919
1920 /* Initialize the Rx event handlers */
1921 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001922 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001923 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1924 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1925 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1926 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1927
1928 /* Get BNA's resource requirement for one Rx object */
1929 spin_lock_irqsave(&bnad->bna_lock, flags);
1930 bna_rx_res_req(rx_config, res_info);
1931 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1932
1933 /* Fill Unmap Q memory requirements */
1934 BNAD_FILL_UNMAPQ_MEM_REQ(
1935 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1936 rx_config->num_paths +
1937 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1938 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1939
1940 /* Allocate resource */
1941 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1942 if (err)
1943 return err;
1944
Rasesh Mody2be67142011-08-30 15:27:39 +00001945 bnad_rx_ctrl_init(bnad, rx_id);
1946
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001947 /* Ask BNA to create one Rx object, supplying required resources */
1948 spin_lock_irqsave(&bnad->bna_lock, flags);
1949 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1950 rx_info);
1951 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody3caa1e952011-08-30 15:27:42 +00001952 if (!rx) {
1953 err = -ENOMEM;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001954 goto err_return;
Rasesh Mody3caa1e952011-08-30 15:27:42 +00001955 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001956 rx_info->rx = rx;
1957
Rasesh Mody2be67142011-08-30 15:27:39 +00001958 /*
1959 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1960 * so that IRQ handler cannot schedule NAPI at this point.
1961 */
1962 bnad_napi_init(bnad, rx_id);
1963
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001964 /* Register ISR for the Rx object */
1965 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1966 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1967 rx_config->num_paths);
1968 if (err)
1969 goto err_return;
1970 }
1971
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001972 spin_lock_irqsave(&bnad->bna_lock, flags);
1973 if (0 == rx_id) {
1974 /* Set up Dynamic Interrupt Moderation Vector */
1975 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1976 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1977
1978 /* Enable VLAN filtering only on the default Rx */
1979 bna_rx_vlanfilter_enable(rx);
1980
1981 /* Start the DIM timer */
1982 bnad_dim_timer_start(bnad);
1983 }
1984
1985 bna_rx_enable(rx);
1986 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1987
Rasesh Mody2be67142011-08-30 15:27:39 +00001988 /* Enable scheduling of NAPI */
1989 bnad_napi_enable(bnad, rx_id);
1990
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001991 return 0;
1992
1993err_return:
1994 bnad_cleanup_rx(bnad, rx_id);
1995 return err;
1996}
1997
1998/* Called with conf_lock & bnad->bna_lock held */
1999void
2000bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2001{
2002 struct bnad_tx_info *tx_info;
2003
2004 tx_info = &bnad->tx_info[0];
2005 if (!tx_info->tx)
2006 return;
2007
2008 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2009}
2010
2011/* Called with conf_lock & bnad->bna_lock held */
2012void
2013bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2014{
2015 struct bnad_rx_info *rx_info;
Rasesh Mody0120b992011-07-22 08:07:41 +00002016 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002017
2018 for (i = 0; i < bnad->num_rx; i++) {
2019 rx_info = &bnad->rx_info[i];
2020 if (!rx_info->rx)
2021 continue;
2022 bna_rx_coalescing_timeo_set(rx_info->rx,
2023 bnad->rx_coalescing_timeo);
2024 }
2025}
2026
2027/*
2028 * Called with bnad->bna_lock held
2029 */
Rasesh Modya2122d92011-08-30 15:27:43 +00002030int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002031bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2032{
2033 int ret;
2034
2035 if (!is_valid_ether_addr(mac_addr))
2036 return -EADDRNOTAVAIL;
2037
2038 /* If datapath is down, pretend everything went through */
2039 if (!bnad->rx_info[0].rx)
2040 return 0;
2041
2042 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2043 if (ret != BNA_CB_SUCCESS)
2044 return -EADDRNOTAVAIL;
2045
2046 return 0;
2047}
2048
2049/* Should be called with conf_lock held */
Rasesh Modya2122d92011-08-30 15:27:43 +00002050int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002051bnad_enable_default_bcast(struct bnad *bnad)
2052{
2053 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2054 int ret;
2055 unsigned long flags;
2056
2057 init_completion(&bnad->bnad_completions.mcast_comp);
2058
2059 spin_lock_irqsave(&bnad->bna_lock, flags);
2060 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2061 bnad_cb_rx_mcast_add);
2062 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2063
2064 if (ret == BNA_CB_SUCCESS)
2065 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2066 else
2067 return -ENODEV;
2068
2069 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2070 return -ENODEV;
2071
2072 return 0;
2073}
2074
Rasesh Mody19dbff92011-08-30 15:27:41 +00002075/* Called with mutex_lock(&bnad->conf_mutex) held */
Rasesh Modya2122d92011-08-30 15:27:43 +00002076void
Rasesh Modyaad75b62010-12-23 21:45:08 +00002077bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2078{
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002079 u16 vid;
Rasesh Modyaad75b62010-12-23 21:45:08 +00002080 unsigned long flags;
2081
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002082 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
Rasesh Modyaad75b62010-12-23 21:45:08 +00002083 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002084 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
Rasesh Modyaad75b62010-12-23 21:45:08 +00002085 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2086 }
2087}
2088
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002089/* Statistics utilities */
2090void
Eric Dumazet250e0612010-09-02 12:45:02 -07002091bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002092{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002093 int i, j;
2094
2095 for (i = 0; i < bnad->num_rx; i++) {
2096 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2097 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002098 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002099 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002100 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002101 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2102 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2103 bnad->rx_info[i].rx_ctrl[j].ccb->
2104 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002105 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002106 bnad->rx_info[i].rx_ctrl[j].
2107 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002108 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002109 bnad->rx_info[i].rx_ctrl[j].
2110 ccb->rcb[1]->rxq->rx_bytes;
2111 }
2112 }
2113 }
2114 }
2115 for (i = 0; i < bnad->num_tx; i++) {
2116 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2117 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002118 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002119 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002120 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002121 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2122 }
2123 }
2124 }
2125}
2126
2127/*
2128 * Must be called with the bna_lock held.
2129 */
2130void
Eric Dumazet250e0612010-09-02 12:45:02 -07002131bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002132{
Rasesh Mody078086f2011-08-08 16:21:39 +00002133 struct bfi_enet_stats_mac *mac_stats;
2134 u32 bmap;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002135 int i;
2136
Rasesh Mody078086f2011-08-08 16:21:39 +00002137 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002138 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002139 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2140 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2141 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002142 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002143 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002144 stats->rx_dropped = mac_stats->rx_drop;
2145 stats->tx_dropped = mac_stats->tx_drop;
2146 stats->multicast = mac_stats->rx_multicast;
2147 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002148
Eric Dumazet250e0612010-09-02 12:45:02 -07002149 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002150
2151 /* receive ring buffer overflow ?? */
2152
Eric Dumazet250e0612010-09-02 12:45:02 -07002153 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2154 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002155 /* recv'r fifo overrun */
Rasesh Mody078086f2011-08-08 16:21:39 +00002156 bmap = bna_rx_rid_mask(&bnad->bna);
2157 for (i = 0; bmap; i++) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002158 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002159 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002160 bnad->stats.bna_stats->
Rasesh Mody078086f2011-08-08 16:21:39 +00002161 hw_stats.rxf_stats[i].frame_drops;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002162 break;
2163 }
2164 bmap >>= 1;
2165 }
2166}
2167
2168static void
2169bnad_mbox_irq_sync(struct bnad *bnad)
2170{
2171 u32 irq;
2172 unsigned long flags;
2173
2174 spin_lock_irqsave(&bnad->bna_lock, flags);
2175 if (bnad->cfg_flags & BNAD_CF_MSIX)
Rasesh Mody8811e262011-07-22 08:07:44 +00002176 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002177 else
2178 irq = bnad->pcidev->irq;
2179 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2180
2181 synchronize_irq(irq);
2182}
2183
2184/* Utility used by bnad_start_xmit, for doing TSO */
2185static int
2186bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2187{
2188 int err;
2189
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002190 if (skb_header_cloned(skb)) {
2191 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2192 if (err) {
2193 BNAD_UPDATE_CTR(bnad, tso_err);
2194 return err;
2195 }
2196 }
2197
2198 /*
2199 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2200 * excluding the length field.
2201 */
2202 if (skb->protocol == htons(ETH_P_IP)) {
2203 struct iphdr *iph = ip_hdr(skb);
2204
2205 /* Do we really need these? */
2206 iph->tot_len = 0;
2207 iph->check = 0;
2208
2209 tcp_hdr(skb)->check =
2210 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2211 IPPROTO_TCP, 0);
2212 BNAD_UPDATE_CTR(bnad, tso4);
2213 } else {
2214 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2215
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002216 ipv6h->payload_len = 0;
2217 tcp_hdr(skb)->check =
2218 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2219 IPPROTO_TCP, 0);
2220 BNAD_UPDATE_CTR(bnad, tso6);
2221 }
2222
2223 return 0;
2224}
2225
2226/*
2227 * Initialize Q numbers depending on Rx Paths
2228 * Called with bnad->bna_lock held, because of cfg_flags
2229 * access.
2230 */
2231static void
2232bnad_q_num_init(struct bnad *bnad)
2233{
2234 int rxps;
2235
2236 rxps = min((uint)num_online_cpus(),
Rasesh Mody772b5232011-08-30 15:27:37 +00002237 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002238
2239 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2240 rxps = 1; /* INTx */
2241
2242 bnad->num_rx = 1;
2243 bnad->num_tx = 1;
2244 bnad->num_rxp_per_rx = rxps;
2245 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2246}
2247
2248/*
2249 * Adjusts the Q numbers, given a number of msix vectors
2250 * Give preference to RSS as opposed to Tx priority Queues,
2251 * in such a case, just use 1 Tx Q
2252 * Called with bnad->bna_lock held b'cos of cfg_flags access
2253 */
2254static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002255bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002256{
2257 bnad->num_txq_per_tx = 1;
2258 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2259 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2260 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2261 bnad->num_rxp_per_rx = msix_vectors -
2262 (bnad->num_tx * bnad->num_txq_per_tx) -
2263 BNAD_MAILBOX_MSIX_VECTORS;
2264 } else
2265 bnad->num_rxp_per_rx = 1;
2266}
2267
Rasesh Mody078086f2011-08-08 16:21:39 +00002268/* Enable / disable ioceth */
2269static int
2270bnad_ioceth_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002271{
2272 unsigned long flags;
Rasesh Mody078086f2011-08-08 16:21:39 +00002273 int err = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002274
2275 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002276 init_completion(&bnad->bnad_completions.ioc_comp);
2277 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002278 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2279
Rasesh Mody078086f2011-08-08 16:21:39 +00002280 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2281 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2282
2283 err = bnad->bnad_completions.ioc_comp_status;
2284 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002285}
2286
2287static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002288bnad_ioceth_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002289{
2290 int err = 0;
2291 unsigned long flags;
2292
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002293 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002294 init_completion(&bnad->bnad_completions.ioc_comp);
2295 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2296 bna_ioceth_enable(&bnad->bna.ioceth);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002297 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2298
Rasesh Mody078086f2011-08-08 16:21:39 +00002299 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2300 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002301
Rasesh Mody078086f2011-08-08 16:21:39 +00002302 err = bnad->bnad_completions.ioc_comp_status;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002303
2304 return err;
2305}
2306
2307/* Free BNA resources */
2308static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002309bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2310 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002311{
2312 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002313
Rasesh Mody078086f2011-08-08 16:21:39 +00002314 for (i = 0; i < res_val_max; i++)
2315 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002316}
2317
2318/* Allocates memory and interrupt resources for BNA */
2319static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002320bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2321 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002322{
2323 int i, err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002324
Rasesh Mody078086f2011-08-08 16:21:39 +00002325 for (i = 0; i < res_val_max; i++) {
2326 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002327 if (err)
2328 goto err_return;
2329 }
2330 return 0;
2331
2332err_return:
Rasesh Mody078086f2011-08-08 16:21:39 +00002333 bnad_res_free(bnad, res_info, res_val_max);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002334 return err;
2335}
2336
2337/* Interrupt enable / disable */
2338static void
2339bnad_enable_msix(struct bnad *bnad)
2340{
2341 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002342 unsigned long flags;
2343
2344 spin_lock_irqsave(&bnad->bna_lock, flags);
2345 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2346 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2347 return;
2348 }
2349 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2350
2351 if (bnad->msix_table)
2352 return;
2353
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002354 bnad->msix_table =
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002355 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002356
2357 if (!bnad->msix_table)
2358 goto intx_mode;
2359
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002360 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002361 bnad->msix_table[i].entry = i;
2362
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002363 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002364 if (ret > 0) {
2365 /* Not enough MSI-X vectors. */
Rasesh Mody19dbff92011-08-30 15:27:41 +00002366 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2367 ret, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002368
2369 spin_lock_irqsave(&bnad->bna_lock, flags);
2370 /* ret = #of vectors that we got */
Rasesh Mody271e8b72011-08-30 15:27:40 +00002371 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2372 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002373 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2374
Rasesh Mody271e8b72011-08-30 15:27:40 +00002375 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002376 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002377
Rasesh Mody078086f2011-08-08 16:21:39 +00002378 if (bnad->msix_num > ret)
2379 goto intx_mode;
2380
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002381 /* Try once more with adjusted numbers */
2382 /* If this fails, fall back to INTx */
2383 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002384 bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002385 if (ret)
2386 goto intx_mode;
2387
2388 } else if (ret < 0)
2389 goto intx_mode;
Rasesh Mody078086f2011-08-08 16:21:39 +00002390
2391 pci_intx(bnad->pcidev, 0);
2392
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002393 return;
2394
2395intx_mode:
Rasesh Mody19dbff92011-08-30 15:27:41 +00002396 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002397
2398 kfree(bnad->msix_table);
2399 bnad->msix_table = NULL;
2400 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002401 spin_lock_irqsave(&bnad->bna_lock, flags);
2402 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2403 bnad_q_num_init(bnad);
2404 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2405}
2406
2407static void
2408bnad_disable_msix(struct bnad *bnad)
2409{
2410 u32 cfg_flags;
2411 unsigned long flags;
2412
2413 spin_lock_irqsave(&bnad->bna_lock, flags);
2414 cfg_flags = bnad->cfg_flags;
2415 if (bnad->cfg_flags & BNAD_CF_MSIX)
2416 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2417 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2418
2419 if (cfg_flags & BNAD_CF_MSIX) {
2420 pci_disable_msix(bnad->pcidev);
2421 kfree(bnad->msix_table);
2422 bnad->msix_table = NULL;
2423 }
2424}
2425
2426/* Netdev entry points */
2427static int
2428bnad_open(struct net_device *netdev)
2429{
2430 int err;
2431 struct bnad *bnad = netdev_priv(netdev);
2432 struct bna_pause_config pause_config;
2433 int mtu;
2434 unsigned long flags;
2435
2436 mutex_lock(&bnad->conf_mutex);
2437
2438 /* Tx */
2439 err = bnad_setup_tx(bnad, 0);
2440 if (err)
2441 goto err_return;
2442
2443 /* Rx */
2444 err = bnad_setup_rx(bnad, 0);
2445 if (err)
2446 goto cleanup_tx;
2447
2448 /* Port */
2449 pause_config.tx_pause = 0;
2450 pause_config.rx_pause = 0;
2451
Rasesh Mody078086f2011-08-08 16:21:39 +00002452 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002453
2454 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002455 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2456 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2457 bna_enet_enable(&bnad->bna.enet);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002458 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2459
2460 /* Enable broadcast */
2461 bnad_enable_default_bcast(bnad);
2462
Rasesh Modyaad75b62010-12-23 21:45:08 +00002463 /* Restore VLANs, if any */
2464 bnad_restore_vlans(bnad, 0);
2465
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002466 /* Set the UCAST address */
2467 spin_lock_irqsave(&bnad->bna_lock, flags);
2468 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2469 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2470
2471 /* Start the stats timer */
2472 bnad_stats_timer_start(bnad);
2473
2474 mutex_unlock(&bnad->conf_mutex);
2475
2476 return 0;
2477
2478cleanup_tx:
2479 bnad_cleanup_tx(bnad, 0);
2480
2481err_return:
2482 mutex_unlock(&bnad->conf_mutex);
2483 return err;
2484}
2485
2486static int
2487bnad_stop(struct net_device *netdev)
2488{
2489 struct bnad *bnad = netdev_priv(netdev);
2490 unsigned long flags;
2491
2492 mutex_lock(&bnad->conf_mutex);
2493
2494 /* Stop the stats timer */
2495 bnad_stats_timer_stop(bnad);
2496
Rasesh Mody078086f2011-08-08 16:21:39 +00002497 init_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002498
2499 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002500 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2501 bnad_cb_enet_disabled);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002502 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2503
Rasesh Mody078086f2011-08-08 16:21:39 +00002504 wait_for_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002505
2506 bnad_cleanup_tx(bnad, 0);
2507 bnad_cleanup_rx(bnad, 0);
2508
2509 /* Synchronize mailbox IRQ */
2510 bnad_mbox_irq_sync(bnad);
2511
2512 mutex_unlock(&bnad->conf_mutex);
2513
2514 return 0;
2515}
2516
2517/* TX */
2518/*
2519 * bnad_start_xmit : Netdev entry point for Transmit
2520 * Called under lock held by net_device
2521 */
2522static netdev_tx_t
2523bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2524{
2525 struct bnad *bnad = netdev_priv(netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00002526 u32 txq_id = 0;
2527 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002528
Rasesh Mody0120b992011-07-22 08:07:41 +00002529 u16 txq_prod, vlan_tag = 0;
2530 u32 unmap_prod, wis, wis_used, wi_range;
2531 u32 vectors, vect_id, i, acked;
Rasesh Mody0120b992011-07-22 08:07:41 +00002532 int err;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002533 unsigned int len;
2534 u32 gso_size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002535
Rasesh Mody078086f2011-08-08 16:21:39 +00002536 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
Rasesh Mody0120b992011-07-22 08:07:41 +00002537 dma_addr_t dma_addr;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002538 struct bna_txq_entry *txqent;
Rasesh Mody078086f2011-08-08 16:21:39 +00002539 u16 flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002540
Rasesh Mody271e8b72011-08-30 15:27:40 +00002541 if (unlikely(skb->len <= ETH_HLEN)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002542 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002543 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2544 return NETDEV_TX_OK;
2545 }
2546 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2547 dev_kfree_skb(skb);
2548 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2549 return NETDEV_TX_OK;
2550 }
2551 if (unlikely(skb_headlen(skb) == 0)) {
2552 dev_kfree_skb(skb);
2553 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002554 return NETDEV_TX_OK;
2555 }
2556
Rasesh Modybe7fa322010-12-23 21:45:01 +00002557 /*
2558 * Takes care of the Tx that is scheduled between clearing the flag
Rasesh Mody19dbff92011-08-30 15:27:41 +00002559 * and the netif_tx_stop_all_queues() call.
Rasesh Modybe7fa322010-12-23 21:45:01 +00002560 */
2561 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2562 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002563 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002564 return NETDEV_TX_OK;
2565 }
2566
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002567 vectors = 1 + skb_shinfo(skb)->nr_frags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002568 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002569 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002570 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002571 return NETDEV_TX_OK;
2572 }
2573 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2574 acked = 0;
Rasesh Mody078086f2011-08-08 16:21:39 +00002575 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2576 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002577 if ((u16) (*tcb->hw_consumer_index) !=
2578 tcb->consumer_index &&
2579 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2580 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002581 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2582 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002583 smp_mb__before_clear_bit();
2584 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2585 } else {
2586 netif_stop_queue(netdev);
2587 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2588 }
2589
2590 smp_mb();
2591 /*
2592 * Check again to deal with race condition between
2593 * netif_stop_queue here, and netif_wake_queue in
2594 * interrupt handler which is not inside netif tx lock.
2595 */
2596 if (likely
2597 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2598 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2599 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2600 return NETDEV_TX_BUSY;
2601 } else {
2602 netif_wake_queue(netdev);
2603 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2604 }
2605 }
2606
2607 unmap_prod = unmap_q->producer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002608 flags = 0;
2609
2610 txq_prod = tcb->producer_index;
2611 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002612 txqent->hdr.wi.reserved = 0;
2613 txqent->hdr.wi.num_vectors = vectors;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002614
Jesse Grosseab6d182010-10-20 13:56:03 +00002615 if (vlan_tx_tag_present(skb)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002616 vlan_tag = (u16) vlan_tx_tag_get(skb);
2617 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2618 }
2619 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2620 vlan_tag =
2621 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2622 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2623 }
2624
2625 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2626
2627 if (skb_is_gso(skb)) {
Rasesh Mody271e8b72011-08-30 15:27:40 +00002628 gso_size = skb_shinfo(skb)->gso_size;
2629
2630 if (unlikely(gso_size > netdev->mtu)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002631 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002632 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002633 return NETDEV_TX_OK;
2634 }
Rasesh Mody271e8b72011-08-30 15:27:40 +00002635 if (unlikely((gso_size + skb_transport_offset(skb) +
2636 tcp_hdrlen(skb)) >= skb->len)) {
2637 txqent->hdr.wi.opcode =
2638 __constant_htons(BNA_TXQ_WI_SEND);
2639 txqent->hdr.wi.lso_mss = 0;
2640 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2641 } else {
2642 txqent->hdr.wi.opcode =
2643 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2644 txqent->hdr.wi.lso_mss = htons(gso_size);
2645 }
2646
2647 err = bnad_tso_prepare(bnad, skb);
2648 if (unlikely(err)) {
2649 dev_kfree_skb(skb);
2650 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2651 return NETDEV_TX_OK;
2652 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002653 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2654 txqent->hdr.wi.l4_hdr_size_n_offset =
2655 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2656 (tcp_hdrlen(skb) >> 2,
2657 skb_transport_offset(skb)));
Rasesh Mody271e8b72011-08-30 15:27:40 +00002658 } else {
2659 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002660 txqent->hdr.wi.lso_mss = 0;
2661
Rasesh Mody271e8b72011-08-30 15:27:40 +00002662 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2663 dev_kfree_skb(skb);
2664 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2665 return NETDEV_TX_OK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002666 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002667
Rasesh Mody271e8b72011-08-30 15:27:40 +00002668 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2669 u8 proto = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002670
Rasesh Mody271e8b72011-08-30 15:27:40 +00002671 if (skb->protocol == __constant_htons(ETH_P_IP))
2672 proto = ip_hdr(skb)->protocol;
2673 else if (skb->protocol ==
2674 __constant_htons(ETH_P_IPV6)) {
2675 /* nexthdr may not be TCP immediately. */
2676 proto = ipv6_hdr(skb)->nexthdr;
2677 }
2678 if (proto == IPPROTO_TCP) {
2679 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2680 txqent->hdr.wi.l4_hdr_size_n_offset =
2681 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2682 (0, skb_transport_offset(skb)));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002683
Rasesh Mody271e8b72011-08-30 15:27:40 +00002684 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002685
Rasesh Mody271e8b72011-08-30 15:27:40 +00002686 if (unlikely(skb_headlen(skb) <
2687 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2688 dev_kfree_skb(skb);
2689 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2690 return NETDEV_TX_OK;
2691 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002692
Rasesh Mody271e8b72011-08-30 15:27:40 +00002693 } else if (proto == IPPROTO_UDP) {
2694 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2695 txqent->hdr.wi.l4_hdr_size_n_offset =
2696 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2697 (0, skb_transport_offset(skb)));
2698
2699 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2700 if (unlikely(skb_headlen(skb) <
2701 skb_transport_offset(skb) +
2702 sizeof(struct udphdr))) {
2703 dev_kfree_skb(skb);
2704 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2705 return NETDEV_TX_OK;
2706 }
2707 } else {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002708 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002709 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002710 return NETDEV_TX_OK;
2711 }
Rasesh Mody271e8b72011-08-30 15:27:40 +00002712 } else {
2713 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002714 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002715 }
2716
2717 txqent->hdr.wi.flags = htons(flags);
2718
2719 txqent->hdr.wi.frame_length = htonl(skb->len);
2720
2721 unmap_q->unmap_array[unmap_prod].skb = skb;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002722 len = skb_headlen(skb);
2723 txqent->vector[0].length = htons(len);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002724 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2725 skb_headlen(skb), DMA_TO_DEVICE);
2726 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002727 dma_addr);
2728
Rasesh Mody271e8b72011-08-30 15:27:40 +00002729 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002730 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2731
Rasesh Mody271e8b72011-08-30 15:27:40 +00002732 vect_id = 0;
2733 wis_used = 1;
2734
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002735 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2736 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Rasesh Mody078086f2011-08-08 16:21:39 +00002737 u16 size = frag->size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002738
Rasesh Mody271e8b72011-08-30 15:27:40 +00002739 if (unlikely(size == 0)) {
2740 unmap_prod = unmap_q->producer_index;
2741
2742 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2743 unmap_q->unmap_array,
2744 unmap_prod, unmap_q->q_depth, skb,
2745 i);
2746 dev_kfree_skb(skb);
2747 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2748 return NETDEV_TX_OK;
2749 }
2750
2751 len += size;
2752
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002753 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2754 vect_id = 0;
2755 if (--wi_range)
2756 txqent++;
2757 else {
2758 BNA_QE_INDX_ADD(txq_prod, wis_used,
2759 tcb->q_depth);
2760 wis_used = 0;
2761 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2762 txqent, wi_range);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002763 }
2764 wis_used++;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002765 txqent->hdr.wi_ext.opcode =
2766 __constant_htons(BNA_TXQ_WI_EXTENSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002767 }
2768
2769 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2770 txqent->vector[vect_id].length = htons(size);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002771 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2772 frag->page_offset, size, DMA_TO_DEVICE);
2773 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002774 dma_addr);
2775 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2776 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2777 }
2778
Rasesh Mody271e8b72011-08-30 15:27:40 +00002779 if (unlikely(len != skb->len)) {
2780 unmap_prod = unmap_q->producer_index;
2781
2782 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2783 unmap_q->unmap_array, unmap_prod,
2784 unmap_q->q_depth, skb,
2785 skb_shinfo(skb)->nr_frags);
2786 dev_kfree_skb(skb);
2787 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2788 return NETDEV_TX_OK;
2789 }
2790
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002791 unmap_q->producer_index = unmap_prod;
2792 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2793 tcb->producer_index = txq_prod;
2794
2795 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00002796
2797 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2798 return NETDEV_TX_OK;
2799
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002800 bna_txq_prod_indx_doorbell(tcb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002801 smp_mb();
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002802
2803 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2804 tasklet_schedule(&bnad->tx_free_tasklet);
2805
2806 return NETDEV_TX_OK;
2807}
2808
2809/*
2810 * Used spin_lock to synchronize reading of stats structures, which
2811 * is written by BNA under the same lock.
2812 */
Eric Dumazet250e0612010-09-02 12:45:02 -07002813static struct rtnl_link_stats64 *
2814bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002815{
2816 struct bnad *bnad = netdev_priv(netdev);
2817 unsigned long flags;
2818
2819 spin_lock_irqsave(&bnad->bna_lock, flags);
2820
Eric Dumazet250e0612010-09-02 12:45:02 -07002821 bnad_netdev_qstats_fill(bnad, stats);
2822 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002823
2824 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2825
Eric Dumazet250e0612010-09-02 12:45:02 -07002826 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002827}
2828
Rasesh Modya2122d92011-08-30 15:27:43 +00002829void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002830bnad_set_rx_mode(struct net_device *netdev)
2831{
2832 struct bnad *bnad = netdev_priv(netdev);
2833 u32 new_mask, valid_mask;
2834 unsigned long flags;
2835
2836 spin_lock_irqsave(&bnad->bna_lock, flags);
2837
2838 new_mask = valid_mask = 0;
2839
2840 if (netdev->flags & IFF_PROMISC) {
2841 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2842 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2843 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2844 bnad->cfg_flags |= BNAD_CF_PROMISC;
2845 }
2846 } else {
2847 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2848 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2849 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2850 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2851 }
2852 }
2853
2854 if (netdev->flags & IFF_ALLMULTI) {
2855 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2856 new_mask |= BNA_RXMODE_ALLMULTI;
2857 valid_mask |= BNA_RXMODE_ALLMULTI;
2858 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2859 }
2860 } else {
2861 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2862 new_mask &= ~BNA_RXMODE_ALLMULTI;
2863 valid_mask |= BNA_RXMODE_ALLMULTI;
2864 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2865 }
2866 }
2867
Rasesh Mody271e8b72011-08-30 15:27:40 +00002868 if (bnad->rx_info[0].rx == NULL)
2869 goto unlock;
2870
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002871 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2872
2873 if (!netdev_mc_empty(netdev)) {
2874 u8 *mcaddr_list;
2875 int mc_count = netdev_mc_count(netdev);
2876
2877 /* Index 0 holds the broadcast address */
2878 mcaddr_list =
2879 kzalloc((mc_count + 1) * ETH_ALEN,
2880 GFP_ATOMIC);
2881 if (!mcaddr_list)
Jiri Slabyca1cef32010-09-04 02:08:41 +00002882 goto unlock;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002883
2884 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2885
2886 /* Copy rest of the MC addresses */
2887 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2888
2889 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2890 mcaddr_list, NULL);
2891
2892 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2893 kfree(mcaddr_list);
2894 }
Jiri Slabyca1cef32010-09-04 02:08:41 +00002895unlock:
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002896 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2897}
2898
2899/*
2900 * bna_lock is used to sync writes to netdev->addr
2901 * conf_lock cannot be used since this call may be made
2902 * in a non-blocking context.
2903 */
2904static int
2905bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2906{
2907 int err;
2908 struct bnad *bnad = netdev_priv(netdev);
2909 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2910 unsigned long flags;
2911
2912 spin_lock_irqsave(&bnad->bna_lock, flags);
2913
2914 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2915
2916 if (!err)
2917 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2918
2919 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2920
2921 return err;
2922}
2923
2924static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002925bnad_mtu_set(struct bnad *bnad, int mtu)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002926{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002927 unsigned long flags;
2928
Rasesh Mody078086f2011-08-08 16:21:39 +00002929 init_completion(&bnad->bnad_completions.mtu_comp);
2930
2931 spin_lock_irqsave(&bnad->bna_lock, flags);
2932 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2933 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2934
2935 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2936
2937 return bnad->bnad_completions.mtu_comp_status;
2938}
2939
2940static int
2941bnad_change_mtu(struct net_device *netdev, int new_mtu)
2942{
2943 int err, mtu = netdev->mtu;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002944 struct bnad *bnad = netdev_priv(netdev);
2945
2946 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2947 return -EINVAL;
2948
2949 mutex_lock(&bnad->conf_mutex);
2950
2951 netdev->mtu = new_mtu;
2952
Rasesh Mody078086f2011-08-08 16:21:39 +00002953 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2954 err = bnad_mtu_set(bnad, mtu);
2955 if (err)
2956 err = -EBUSY;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002957
2958 mutex_unlock(&bnad->conf_mutex);
2959 return err;
2960}
2961
2962static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002963bnad_vlan_rx_add_vid(struct net_device *netdev,
2964 unsigned short vid)
2965{
2966 struct bnad *bnad = netdev_priv(netdev);
2967 unsigned long flags;
2968
2969 if (!bnad->rx_info[0].rx)
2970 return;
2971
2972 mutex_lock(&bnad->conf_mutex);
2973
2974 spin_lock_irqsave(&bnad->bna_lock, flags);
2975 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002976 set_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002977 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2978
2979 mutex_unlock(&bnad->conf_mutex);
2980}
2981
2982static void
2983bnad_vlan_rx_kill_vid(struct net_device *netdev,
2984 unsigned short vid)
2985{
2986 struct bnad *bnad = netdev_priv(netdev);
2987 unsigned long flags;
2988
2989 if (!bnad->rx_info[0].rx)
2990 return;
2991
2992 mutex_lock(&bnad->conf_mutex);
2993
2994 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002995 clear_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002996 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2997 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2998
2999 mutex_unlock(&bnad->conf_mutex);
3000}
3001
3002#ifdef CONFIG_NET_POLL_CONTROLLER
3003static void
3004bnad_netpoll(struct net_device *netdev)
3005{
3006 struct bnad *bnad = netdev_priv(netdev);
3007 struct bnad_rx_info *rx_info;
3008 struct bnad_rx_ctrl *rx_ctrl;
3009 u32 curr_mask;
3010 int i, j;
3011
3012 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3013 bna_intx_disable(&bnad->bna, curr_mask);
3014 bnad_isr(bnad->pcidev->irq, netdev);
3015 bna_intx_enable(&bnad->bna, curr_mask);
3016 } else {
Rasesh Mody19dbff92011-08-30 15:27:41 +00003017 /*
3018 * Tx processing may happen in sending context, so no need
3019 * to explicitly process completions here
3020 */
3021
3022 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003023 for (i = 0; i < bnad->num_rx; i++) {
3024 rx_info = &bnad->rx_info[i];
3025 if (!rx_info->rx)
3026 continue;
3027 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3028 rx_ctrl = &rx_info->rx_ctrl[j];
Rasesh Mody271e8b72011-08-30 15:27:40 +00003029 if (rx_ctrl->ccb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003030 bnad_netif_rx_schedule_poll(bnad,
3031 rx_ctrl->ccb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003032 }
3033 }
3034 }
3035}
3036#endif
3037
3038static const struct net_device_ops bnad_netdev_ops = {
3039 .ndo_open = bnad_open,
3040 .ndo_stop = bnad_stop,
3041 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07003042 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003043 .ndo_set_rx_mode = bnad_set_rx_mode,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003044 .ndo_validate_addr = eth_validate_addr,
3045 .ndo_set_mac_address = bnad_set_mac_address,
3046 .ndo_change_mtu = bnad_change_mtu,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003047 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3048 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3049#ifdef CONFIG_NET_POLL_CONTROLLER
3050 .ndo_poll_controller = bnad_netpoll
3051#endif
3052};
3053
3054static void
3055bnad_netdev_init(struct bnad *bnad, bool using_dac)
3056{
3057 struct net_device *netdev = bnad->netdev;
3058
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003059 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3060 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3061 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003062
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003063 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3064 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3065 NETIF_F_TSO | NETIF_F_TSO6;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003066
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003067 netdev->features |= netdev->hw_features |
3068 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003069
3070 if (using_dac)
3071 netdev->features |= NETIF_F_HIGHDMA;
3072
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003073 netdev->mem_start = bnad->mmio_start;
3074 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3075
3076 netdev->netdev_ops = &bnad_netdev_ops;
3077 bnad_set_ethtool_ops(netdev);
3078}
3079
3080/*
3081 * 1. Initialize the bnad structure
3082 * 2. Setup netdev pointer in pci_dev
3083 * 3. Initialze Tx free tasklet
3084 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3085 */
3086static int
3087bnad_init(struct bnad *bnad,
3088 struct pci_dev *pdev, struct net_device *netdev)
3089{
3090 unsigned long flags;
3091
3092 SET_NETDEV_DEV(netdev, &pdev->dev);
3093 pci_set_drvdata(pdev, netdev);
3094
3095 bnad->netdev = netdev;
3096 bnad->pcidev = pdev;
3097 bnad->mmio_start = pci_resource_start(pdev, 0);
3098 bnad->mmio_len = pci_resource_len(pdev, 0);
3099 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3100 if (!bnad->bar0) {
3101 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3102 pci_set_drvdata(pdev, NULL);
3103 return -ENOMEM;
3104 }
3105 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3106 (unsigned long long) bnad->mmio_len);
3107
3108 spin_lock_irqsave(&bnad->bna_lock, flags);
3109 if (!bnad_msix_disable)
3110 bnad->cfg_flags = BNAD_CF_MSIX;
3111
3112 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3113
3114 bnad_q_num_init(bnad);
3115 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3116
3117 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3118 (bnad->num_rx * bnad->num_rxp_per_rx) +
3119 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003120
3121 bnad->txq_depth = BNAD_TXQ_DEPTH;
3122 bnad->rxq_depth = BNAD_RXQ_DEPTH;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003123
3124 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3125 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3126
3127 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3128 (unsigned long)bnad);
3129
3130 return 0;
3131}
3132
3133/*
3134 * Must be called after bnad_pci_uninit()
3135 * so that iounmap() and pci_set_drvdata(NULL)
3136 * happens only after PCI uninitialization.
3137 */
3138static void
3139bnad_uninit(struct bnad *bnad)
3140{
3141 if (bnad->bar0)
3142 iounmap(bnad->bar0);
3143 pci_set_drvdata(bnad->pcidev, NULL);
3144}
3145
3146/*
3147 * Initialize locks
Rasesh Mody078086f2011-08-08 16:21:39 +00003148 a) Per ioceth mutes used for serializing configuration
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003149 changes from OS interface
3150 b) spin lock used to protect bna state machine
3151 */
3152static void
3153bnad_lock_init(struct bnad *bnad)
3154{
3155 spin_lock_init(&bnad->bna_lock);
3156 mutex_init(&bnad->conf_mutex);
3157}
3158
3159static void
3160bnad_lock_uninit(struct bnad *bnad)
3161{
3162 mutex_destroy(&bnad->conf_mutex);
3163}
3164
3165/* PCI Initialization */
3166static int
3167bnad_pci_init(struct bnad *bnad,
3168 struct pci_dev *pdev, bool *using_dac)
3169{
3170 int err;
3171
3172 err = pci_enable_device(pdev);
3173 if (err)
3174 return err;
3175 err = pci_request_regions(pdev, BNAD_NAME);
3176 if (err)
3177 goto disable_device;
Ivan Vecera5ea74312011-02-02 04:37:02 +00003178 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3179 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003180 *using_dac = 1;
3181 } else {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003182 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003183 if (err) {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003184 err = dma_set_coherent_mask(&pdev->dev,
3185 DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003186 if (err)
3187 goto release_regions;
3188 }
3189 *using_dac = 0;
3190 }
3191 pci_set_master(pdev);
3192 return 0;
3193
3194release_regions:
3195 pci_release_regions(pdev);
3196disable_device:
3197 pci_disable_device(pdev);
3198
3199 return err;
3200}
3201
3202static void
3203bnad_pci_uninit(struct pci_dev *pdev)
3204{
3205 pci_release_regions(pdev);
3206 pci_disable_device(pdev);
3207}
3208
3209static int __devinit
3210bnad_pci_probe(struct pci_dev *pdev,
3211 const struct pci_device_id *pcidev_id)
3212{
Rasesh Mody3caa1e952011-08-30 15:27:42 +00003213 bool using_dac;
Rasesh Mody0120b992011-07-22 08:07:41 +00003214 int err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003215 struct bnad *bnad;
3216 struct bna *bna;
3217 struct net_device *netdev;
3218 struct bfa_pcidev pcidev_info;
3219 unsigned long flags;
3220
3221 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3222 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3223
3224 mutex_lock(&bnad_fwimg_mutex);
3225 if (!cna_get_firmware_buf(pdev)) {
3226 mutex_unlock(&bnad_fwimg_mutex);
3227 pr_warn("Failed to load Firmware Image!\n");
3228 return -ENODEV;
3229 }
3230 mutex_unlock(&bnad_fwimg_mutex);
3231
3232 /*
3233 * Allocates sizeof(struct net_device + struct bnad)
3234 * bnad = netdev->priv
3235 */
3236 netdev = alloc_etherdev(sizeof(struct bnad));
3237 if (!netdev) {
Rasesh Mody078086f2011-08-08 16:21:39 +00003238 dev_err(&pdev->dev, "netdev allocation failed\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003239 err = -ENOMEM;
3240 return err;
3241 }
3242 bnad = netdev_priv(netdev);
3243
Rasesh Mody078086f2011-08-08 16:21:39 +00003244 bnad_lock_init(bnad);
3245
3246 mutex_lock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003247 /*
3248 * PCI initialization
Rasesh Mody0120b992011-07-22 08:07:41 +00003249 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003250 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003251 */
3252 err = bnad_pci_init(bnad, pdev, &using_dac);
3253 if (err)
Dan Carpenter44861f42011-08-24 01:29:22 +00003254 goto unlock_mutex;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003255
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003256 /*
3257 * Initialize bnad structure
3258 * Setup relation between pci_dev & netdev
3259 * Init Tx free tasklet
3260 */
3261 err = bnad_init(bnad, pdev, netdev);
3262 if (err)
3263 goto pci_uninit;
Rasesh Mody078086f2011-08-08 16:21:39 +00003264
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003265 /* Initialize netdev structure, set up ethtool ops */
3266 bnad_netdev_init(bnad, using_dac);
3267
Rasesh Mody815f41e2010-12-23 21:45:03 +00003268 /* Set link to down state */
3269 netif_carrier_off(netdev);
3270
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003271 /* Get resource requirement form bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003272 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003273 bna_res_req(&bnad->res_info[0]);
Rasesh Mody078086f2011-08-08 16:21:39 +00003274 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003275
3276 /* Allocate resources from bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003277 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003278 if (err)
Rasesh Mody078086f2011-08-08 16:21:39 +00003279 goto drv_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003280
3281 bna = &bnad->bna;
3282
3283 /* Setup pcidev_info for bna_init() */
3284 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3285 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3286 pcidev_info.device_id = bnad->pcidev->device;
3287 pcidev_info.pci_bar_kva = bnad->bar0;
3288
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003289 spin_lock_irqsave(&bnad->bna_lock, flags);
3290 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003291 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3292
3293 bnad->stats.bna_stats = &bna->stats;
3294
Rasesh Mody078086f2011-08-08 16:21:39 +00003295 bnad_enable_msix(bnad);
3296 err = bnad_mbox_irq_alloc(bnad);
3297 if (err)
3298 goto res_free;
3299
3300
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003301 /* Set up timers */
Rasesh Mody078086f2011-08-08 16:21:39 +00003302 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003303 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003304 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003305 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003306 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
Rasesh Mody1d32f762010-12-23 21:45:09 +00003307 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003308 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003309 ((unsigned long)bnad));
3310
3311 /* Now start the timer before calling IOC */
Rasesh Mody078086f2011-08-08 16:21:39 +00003312 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003313 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3314
3315 /*
3316 * Start the chip
Rasesh Mody078086f2011-08-08 16:21:39 +00003317 * If the call back comes with error, we bail out.
3318 * This is a catastrophic error.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003319 */
Rasesh Mody078086f2011-08-08 16:21:39 +00003320 err = bnad_ioceth_enable(bnad);
3321 if (err) {
3322 pr_err("BNA: Initialization failed err=%d\n",
3323 err);
3324 goto probe_success;
3325 }
3326
3327 spin_lock_irqsave(&bnad->bna_lock, flags);
3328 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3329 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3330 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3331 bna_attr(bna)->num_rxp - 1);
3332 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3333 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3334 err = -EIO;
3335 }
Rasesh Mody3caa1e952011-08-30 15:27:42 +00003336 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3337 if (err)
3338 goto disable_ioceth;
3339
3340 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003341 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3342 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3343
3344 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003345 if (err) {
3346 err = -EIO;
Rasesh Mody078086f2011-08-08 16:21:39 +00003347 goto disable_ioceth;
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003348 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003349
3350 spin_lock_irqsave(&bnad->bna_lock, flags);
3351 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3352 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003353
3354 /* Get the burnt-in mac */
3355 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003356 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003357 bnad_set_netdev_perm_addr(bnad);
3358 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3359
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003360 mutex_unlock(&bnad->conf_mutex);
3361
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003362 /* Finally, reguister with net_device layer */
3363 err = register_netdev(netdev);
3364 if (err) {
3365 pr_err("BNA : Registering with netdev failed\n");
Rasesh Mody078086f2011-08-08 16:21:39 +00003366 goto probe_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003367 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003368 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003369
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003370 return 0;
3371
Rasesh Mody078086f2011-08-08 16:21:39 +00003372probe_success:
3373 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003374 return 0;
3375
Rasesh Mody078086f2011-08-08 16:21:39 +00003376probe_uninit:
3377 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3378disable_ioceth:
3379 bnad_ioceth_disable(bnad);
3380 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3381 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3382 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003383 spin_lock_irqsave(&bnad->bna_lock, flags);
3384 bna_uninit(bna);
3385 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003386 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003387 bnad_disable_msix(bnad);
Rasesh Mody078086f2011-08-08 16:21:39 +00003388res_free:
3389 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3390drv_uninit:
3391 bnad_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003392pci_uninit:
3393 bnad_pci_uninit(pdev);
Dan Carpenter44861f42011-08-24 01:29:22 +00003394unlock_mutex:
Rasesh Mody078086f2011-08-08 16:21:39 +00003395 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003396 bnad_lock_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003397 free_netdev(netdev);
3398 return err;
3399}
3400
3401static void __devexit
3402bnad_pci_remove(struct pci_dev *pdev)
3403{
3404 struct net_device *netdev = pci_get_drvdata(pdev);
3405 struct bnad *bnad;
3406 struct bna *bna;
3407 unsigned long flags;
3408
3409 if (!netdev)
3410 return;
3411
3412 pr_info("%s bnad_pci_remove\n", netdev->name);
3413 bnad = netdev_priv(netdev);
3414 bna = &bnad->bna;
3415
Rasesh Mody078086f2011-08-08 16:21:39 +00003416 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3417 unregister_netdev(netdev);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003418
3419 mutex_lock(&bnad->conf_mutex);
Rasesh Mody078086f2011-08-08 16:21:39 +00003420 bnad_ioceth_disable(bnad);
3421 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3422 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3423 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003424 spin_lock_irqsave(&bnad->bna_lock, flags);
3425 bna_uninit(bna);
3426 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003427
Rasesh Mody078086f2011-08-08 16:21:39 +00003428 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3429 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3430 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003431 bnad_disable_msix(bnad);
3432 bnad_pci_uninit(pdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00003433 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003434 bnad_lock_uninit(bnad);
3435 bnad_uninit(bnad);
3436 free_netdev(netdev);
3437}
3438
Rasesh Mody0120b992011-07-22 08:07:41 +00003439static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003440 {
3441 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3442 PCI_DEVICE_ID_BROCADE_CT),
3443 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3444 .class_mask = 0xffff00
3445 }, {0, }
3446};
3447
3448MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3449
3450static struct pci_driver bnad_pci_driver = {
3451 .name = BNAD_NAME,
3452 .id_table = bnad_pci_id_table,
3453 .probe = bnad_pci_probe,
3454 .remove = __devexit_p(bnad_pci_remove),
3455};
3456
3457static int __init
3458bnad_module_init(void)
3459{
3460 int err;
3461
Rasesh Mody5aad0012011-07-22 08:07:40 +00003462 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3463 BNAD_VERSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003464
Rasesh Mody8a891422010-08-25 23:00:27 -07003465 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003466
3467 err = pci_register_driver(&bnad_pci_driver);
3468 if (err < 0) {
3469 pr_err("bna : PCI registration failed in module init "
3470 "(%d)\n", err);
3471 return err;
3472 }
3473
3474 return 0;
3475}
3476
3477static void __exit
3478bnad_module_exit(void)
3479{
3480 pci_unregister_driver(&bnad_pci_driver);
3481
3482 if (bfi_fw)
3483 release_firmware(bfi_fw);
3484}
3485
3486module_init(bnad_module_init);
3487module_exit(bnad_module_exit);
3488
3489MODULE_AUTHOR("Brocade");
3490MODULE_LICENSE("GPL");
3491MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3492MODULE_VERSION(BNAD_VERSION);
3493MODULE_FIRMWARE(CNA_FW_FILE_CT);