blob: fad912656fe4007f0c0a27edc480749bec485d17 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/etherdevice.h>
21#include <linux/in.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/if_ether.h>
25#include <linux/ip.h>
26
27#include "bnad.h"
28#include "bna.h"
29#include "cna.h"
30
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000031static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070032
33/*
34 * Module params
35 */
36static uint bnad_msix_disable;
37module_param(bnad_msix_disable, uint, 0444);
38MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
39
40static uint bnad_ioc_auto_recover = 1;
41module_param(bnad_ioc_auto_recover, uint, 0444);
42MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
43
44/*
45 * Global variables
46 */
47u32 bnad_rxqs_per_cq = 2;
48
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000049static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Rasesh Mody8b230ed2010-08-23 20:24:12 -070050
51/*
52 * Local MACROS
53 */
54#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
55
56#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
57
58#define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
62
63#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71} while (0)
72
Rasesh Modybe7fa322010-12-23 21:45:01 +000073#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
74
Rasesh Mody8b230ed2010-08-23 20:24:12 -070075/*
76 * Reinitialize completions in CQ, once Rx is taken down
77 */
78static void
79bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
80{
81 struct bna_cq_entry *cmpl, *next_cmpl;
82 unsigned int wi_range, wis = 0, ccb_prod = 0;
83 int i;
84
85 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
86 wi_range);
87
88 for (i = 0; i < ccb->q_depth; i++) {
89 wis++;
90 if (likely(--wi_range))
91 next_cmpl = cmpl + 1;
92 else {
93 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
94 wis = 0;
95 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
96 next_cmpl, wi_range);
97 }
98 cmpl->valid = 0;
99 cmpl = next_cmpl;
100 }
101}
102
103/*
104 * Frees all pending Tx Bufs
105 * At this point no activity is expected on the Q,
106 * so DMA unmap & freeing is fine.
107 */
108static void
109bnad_free_all_txbufs(struct bnad *bnad,
110 struct bna_tcb *tcb)
111{
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000112 u32 unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700113 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
114 struct bnad_skb_unmap *unmap_array;
115 struct sk_buff *skb = NULL;
116 int i;
117
118 unmap_array = unmap_q->unmap_array;
119
120 unmap_cons = 0;
121 while (unmap_cons < unmap_q->q_depth) {
122 skb = unmap_array[unmap_cons].skb;
123 if (!skb) {
124 unmap_cons++;
125 continue;
126 }
127 unmap_array[unmap_cons].skb = NULL;
128
129 pci_unmap_single(bnad->pcidev,
130 pci_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE);
133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000135 if (++unmap_cons >= unmap_q->q_depth)
136 break;
137
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev,
140 pci_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr),
142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000146 if (++unmap_cons >= unmap_q->q_depth)
147 break;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700148 }
149 dev_kfree_skb_any(skb);
150 }
151}
152
153/* Data Path Handlers */
154
155/*
156 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
157 * Can be called in a) Interrupt context
158 * b) Sending context
159 * c) Tasklet context
160 */
161static u32
162bnad_free_txbufs(struct bnad *bnad,
163 struct bna_tcb *tcb)
164{
165 u32 sent_packets = 0, sent_bytes = 0;
166 u16 wis, unmap_cons, updated_hw_cons;
167 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
168 struct bnad_skb_unmap *unmap_array;
169 struct sk_buff *skb;
170 int i;
171
172 /*
173 * Just return if TX is stopped. This check is useful
174 * when bnad_free_txbufs() runs out of a tasklet scheduled
Rasesh Modybe7fa322010-12-23 21:45:01 +0000175 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700176 * but this routine runs actually after the cleanup has been
177 * executed.
178 */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000179 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700180 return 0;
181
182 updated_hw_cons = *(tcb->hw_consumer_index);
183
184 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
185 updated_hw_cons, tcb->q_depth);
186
187 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
188
189 unmap_array = unmap_q->unmap_array;
190 unmap_cons = unmap_q->consumer_index;
191
192 prefetch(&unmap_array[unmap_cons + 1]);
193 while (wis) {
194 skb = unmap_array[unmap_cons].skb;
195
196 unmap_array[unmap_cons].skb = NULL;
197
198 sent_packets++;
199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201
202 pci_unmap_single(bnad->pcidev,
203 pci_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208
209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]);
212
213 pci_unmap_page(bnad->pcidev,
214 pci_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr),
216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 }
222 dev_kfree_skb_any(skb);
223 }
224
225 /* Update consumer pointers. */
226 tcb->consumer_index = updated_hw_cons;
227 unmap_q->consumer_index = unmap_cons;
228
229 tcb->txq->tx_packets += sent_packets;
230 tcb->txq->tx_bytes += sent_bytes;
231
232 return sent_packets;
233}
234
235/* Tx Free Tasklet function */
236/* Frees for all the tcb's in all the Tx's */
237/*
238 * Scheduled from sending context, so that
239 * the fat Tx lock is not held for too long
240 * in the sending context.
241 */
242static void
243bnad_tx_free_tasklet(unsigned long bnad_ptr)
244{
245 struct bnad *bnad = (struct bnad *)bnad_ptr;
246 struct bna_tcb *tcb;
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000247 u32 acked = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700248 int i, j;
249
250 for (i = 0; i < bnad->num_tx; i++) {
251 for (j = 0; j < bnad->num_txq_per_tx; j++) {
252 tcb = bnad->tx_info[i].tcb[j];
253 if (!tcb)
254 continue;
255 if (((u16) (*tcb->hw_consumer_index) !=
256 tcb->consumer_index) &&
257 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
258 &tcb->flags))) {
259 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000260 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
261 &tcb->flags)))
262 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700263 smp_mb__before_clear_bit();
264 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
265 }
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000266 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
267 &tcb->flags)))
268 continue;
269 if (netif_queue_stopped(bnad->netdev)) {
270 if (acked && netif_carrier_ok(bnad->netdev) &&
271 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
272 BNAD_NETIF_WAKE_THRESHOLD) {
273 netif_wake_queue(bnad->netdev);
274 /* TODO */
275 /* Counters for individual TxQs? */
276 BNAD_UPDATE_CTR(bnad,
277 netif_queue_wakeup);
278 }
279 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700280 }
281 }
282}
283
284static u32
285bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
286{
287 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000288 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700289
290 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
291 return 0;
292
293 sent = bnad_free_txbufs(bnad, tcb);
294 if (sent) {
295 if (netif_queue_stopped(netdev) &&
296 netif_carrier_ok(netdev) &&
297 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
298 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000299 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
300 netif_wake_queue(netdev);
301 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
302 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700303 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000304 }
305
306 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700307 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700308
309 smp_mb__before_clear_bit();
310 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
311
312 return sent;
313}
314
315/* MSIX Tx Completion Handler */
316static irqreturn_t
317bnad_msix_tx(int irq, void *data)
318{
319 struct bna_tcb *tcb = (struct bna_tcb *)data;
320 struct bnad *bnad = tcb->bnad;
321
322 bnad_tx(bnad, tcb);
323
324 return IRQ_HANDLED;
325}
326
327static void
328bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
329{
330 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
331
332 rcb->producer_index = 0;
333 rcb->consumer_index = 0;
334
335 unmap_q->producer_index = 0;
336 unmap_q->consumer_index = 0;
337}
338
339static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700341{
342 struct bnad_unmap_q *unmap_q;
343 struct sk_buff *skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000344 int unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700345
346 unmap_q = rcb->unmap_q;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb;
349 if (!skb)
350 continue;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000351 unmap_q->unmap_array[unmap_cons].skb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
Rasesh Modybe7fa322010-12-23 21:45:01 +0000353 unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size,
355 PCI_DMA_FROMDEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700356 dev_kfree_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700357 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700358 bnad_reset_rcb(bnad, rcb);
359}
360
361static void
362bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
363{
364 u16 to_alloc, alloced, unmap_prod, wi_range;
365 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
366 struct bnad_skb_unmap *unmap_array;
367 struct bna_rxq_entry *rxent;
368 struct sk_buff *skb;
369 dma_addr_t dma_addr;
370
371 alloced = 0;
372 to_alloc =
373 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
374
375 unmap_array = unmap_q->unmap_array;
376 unmap_prod = unmap_q->producer_index;
377
378 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
379
380 while (to_alloc--) {
381 if (!wi_range) {
382 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
383 wi_range);
384 }
385 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
386 GFP_ATOMIC);
387 if (unlikely(!skb)) {
388 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
389 goto finishing;
390 }
391 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
400
401 rxent++;
402 wi_range--;
403 alloced++;
404 }
405
406finishing:
407 if (likely(alloced)) {
408 unmap_q->producer_index = unmap_prod;
409 rcb->producer_index = unmap_prod;
410 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000411 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
412 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700413 }
414}
415
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700416static inline void
417bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
418{
419 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
420
421 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
422 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
423 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
424 bnad_alloc_n_post_rxbufs(bnad, rcb);
425 smp_mb__before_clear_bit();
426 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
427 }
428}
429
430static u32
431bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
432{
433 struct bna_cq_entry *cmpl, *next_cmpl;
434 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q;
437 struct sk_buff *skb;
438 u32 flags;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441
Rasesh Modybe7fa322010-12-23 21:45:01 +0000442 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
443 return 0;
444
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700445 prefetch(bnad->netdev);
446 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
447 wi_range);
448 BUG_ON(!(wi_range <= ccb->q_depth));
449 while (cmpl->valid && packets < budget) {
450 packets++;
451 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
452
453 if (qid0 == cmpl->rxq_id)
454 rcb = ccb->rcb[0];
455 else
456 rcb = ccb->rcb[1];
457
458 unmap_q = rcb->unmap_q;
459
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
461 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
463 pci_unmap_single(bnad->pcidev,
464 pci_unmap_addr(&unmap_q->
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr),
468 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471
472 /* Should be more efficient ? Performance ? */
473 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
474
475 wis++;
476 if (likely(--wi_range))
477 next_cmpl = cmpl + 1;
478 else {
479 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
480 wis = 0;
481 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
482 next_cmpl, wi_range);
483 BUG_ON(!(wi_range <= ccb->q_depth));
484 }
485 prefetch(next_cmpl);
486
487 flags = ntohl(cmpl->flags);
488 if (unlikely
489 (flags &
490 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
491 BNA_CQ_EF_TOO_LONG))) {
492 dev_kfree_skb_any(skb);
493 rcb->rxq->rx_packets_with_error++;
494 goto next;
495 }
496
497 skb_put(skb, ntohs(cmpl->length));
498 if (likely
499 (bnad->rx_csum &&
500 (((flags & BNA_CQ_EF_IPV4) &&
501 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
502 (flags & BNA_CQ_EF_IPV6)) &&
503 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
504 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
505 skb->ip_summed = CHECKSUM_UNNECESSARY;
506 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700507 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700508
509 rcb->rxq->rx_packets++;
510 rcb->rxq->rx_bytes += skb->len;
511 skb->protocol = eth_type_trans(skb, bnad->netdev);
512
513 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
514 struct bnad_rx_ctrl *rx_ctrl =
515 (struct bnad_rx_ctrl *)ccb->ctrl;
516 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
517 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
518 ntohs(cmpl->vlan_tag), skb);
519 else
520 vlan_hwaccel_receive_skb(skb,
521 bnad->vlan_grp,
522 ntohs(cmpl->vlan_tag));
523
524 } else { /* Not VLAN tagged/stripped */
525 struct bnad_rx_ctrl *rx_ctrl =
526 (struct bnad_rx_ctrl *)ccb->ctrl;
527 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
528 napi_gro_receive(&rx_ctrl->napi, skb);
529 else
530 netif_receive_skb(skb);
531 }
532
533next:
534 cmpl->valid = 0;
535 cmpl = next_cmpl;
536 }
537
538 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
539
540 if (likely(ccb)) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000541 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
542 bna_ib_ack(ccb->i_dbell, packets);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700543 bnad_refill_rxq(bnad, ccb->rcb[0]);
544 if (ccb->rcb[1])
545 bnad_refill_rxq(bnad, ccb->rcb[1]);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000546 } else {
547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548 bna_ib_ack(ccb->i_dbell, 0);
549 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700550
551 return packets;
552}
553
554static void
555bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
556{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000557 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
558 return;
559
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
562}
563
564static void
565bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
566{
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000567 unsigned long flags;
568
Rasesh Modyaad75b62010-12-23 21:45:08 +0000569 /* Because of polling context */
570 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700571 bnad_enable_rx_irq_unsafe(ccb);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000572 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700573}
574
575static void
576bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
577{
578 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000579 struct napi_struct *napi = &rx_ctrl->napi;
580
581 if (likely(napi_schedule_prep(napi))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700582 bnad_disable_rx_irq(bnad, ccb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000583 __napi_schedule(napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700584 }
585 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
586}
587
588/* MSIX Rx Path Handler */
589static irqreturn_t
590bnad_msix_rx(int irq, void *data)
591{
592 struct bna_ccb *ccb = (struct bna_ccb *)data;
593 struct bnad *bnad = ccb->bnad;
594
595 bnad_netif_rx_schedule_poll(bnad, ccb);
596
597 return IRQ_HANDLED;
598}
599
600/* Interrupt handlers */
601
602/* Mbox Interrupt Handlers */
603static irqreturn_t
604bnad_msix_mbox_handler(int irq, void *data)
605{
606 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000607 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000608 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700609
Rasesh Modybe7fa322010-12-23 21:45:01 +0000610 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
611 return IRQ_HANDLED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700612
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700613 spin_lock_irqsave(&bnad->bna_lock, flags);
614
615 bna_intr_status_get(&bnad->bna, intr_status);
616
617 if (BNA_IS_MBOX_ERR_INTR(intr_status))
618 bna_mbox_handler(&bnad->bna, intr_status);
619
620 spin_unlock_irqrestore(&bnad->bna_lock, flags);
621
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700622 return IRQ_HANDLED;
623}
624
625static irqreturn_t
626bnad_isr(int irq, void *data)
627{
628 int i, j;
629 u32 intr_status;
630 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000631 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700632 struct bnad_rx_info *rx_info;
633 struct bnad_rx_ctrl *rx_ctrl;
634
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000635 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
636 return IRQ_NONE;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700637
638 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000639
640 if (unlikely(!intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700641 return IRQ_NONE;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000642
643 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700644
Rasesh Modybe7fa322010-12-23 21:45:01 +0000645 if (BNA_IS_MBOX_ERR_INTR(intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700646 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000647
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
649
Rasesh Modybe7fa322010-12-23 21:45:01 +0000650 if (!BNA_IS_INTX_DATA_INTR(intr_status))
651 return IRQ_HANDLED;
652
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700653 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000654 /* Tx processing */
655 for (i = 0; i < bnad->num_tx; i++) {
656 for (j = 0; j < bnad->num_txq_per_tx; j++)
657 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
658 }
659 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700660 for (i = 0; i < bnad->num_rx; i++) {
661 rx_info = &bnad->rx_info[i];
662 if (!rx_info->rx)
663 continue;
664 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
665 rx_ctrl = &rx_info->rx_ctrl[j];
666 if (rx_ctrl->ccb)
667 bnad_netif_rx_schedule_poll(bnad,
668 rx_ctrl->ccb);
669 }
670 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700671 return IRQ_HANDLED;
672}
673
674/*
675 * Called in interrupt / callback context
676 * with bna_lock held, so cfg_flags access is OK
677 */
678static void
679bnad_enable_mbox_irq(struct bnad *bnad)
680{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000681 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000682
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700683 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
684}
685
686/*
687 * Called with bnad->bna_lock held b'cos of
688 * bnad->cfg_flags access.
689 */
Rasesh Modyb7ee31c52010-10-05 15:46:05 +0000690static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700691bnad_disable_mbox_irq(struct bnad *bnad)
692{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000693 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000694
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700695 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
696}
697
Rasesh Modybe7fa322010-12-23 21:45:01 +0000698static void
699bnad_set_netdev_perm_addr(struct bnad *bnad)
700{
701 struct net_device *netdev = bnad->netdev;
702
703 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
704 if (is_zero_ether_addr(netdev->dev_addr))
705 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
706}
707
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700708/* Control Path Handlers */
709
710/* Callbacks */
711void
712bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
713{
714 bnad_enable_mbox_irq(bnad);
715}
716
717void
718bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
719{
720 bnad_disable_mbox_irq(bnad);
721}
722
723void
724bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
725{
726 complete(&bnad->bnad_completions.ioc_comp);
727 bnad->bnad_completions.ioc_comp_status = status;
728}
729
730void
731bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
732{
733 complete(&bnad->bnad_completions.ioc_comp);
734 bnad->bnad_completions.ioc_comp_status = status;
735}
736
737static void
738bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
739{
740 struct bnad *bnad = (struct bnad *)arg;
741
742 complete(&bnad->bnad_completions.port_comp);
743
744 netif_carrier_off(bnad->netdev);
745}
746
747void
748bnad_cb_port_link_status(struct bnad *bnad,
749 enum bna_link_status link_status)
750{
751 bool link_up = 0;
752
753 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
754
755 if (link_status == BNA_CEE_UP) {
756 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
757 BNAD_UPDATE_CTR(bnad, cee_up);
758 } else
759 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
760
761 if (link_up) {
762 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000763 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
764 if (!tcb)
765 return;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700766 pr_warn("bna: %s link up\n",
767 bnad->netdev->name);
768 netif_carrier_on(bnad->netdev);
769 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000770 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700771 /* Force an immediate Transmit Schedule */
772 pr_info("bna: %s TX_STARTED\n",
773 bnad->netdev->name);
774 netif_wake_queue(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
776 } else {
777 netif_stop_queue(bnad->netdev);
778 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
779 }
780 }
781 } else {
782 if (netif_carrier_ok(bnad->netdev)) {
783 pr_warn("bna: %s link down\n",
784 bnad->netdev->name);
785 netif_carrier_off(bnad->netdev);
786 BNAD_UPDATE_CTR(bnad, link_toggle);
787 }
788 }
789}
790
791static void
792bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
793 enum bna_cb_status status)
794{
795 struct bnad *bnad = (struct bnad *)arg;
796
797 complete(&bnad->bnad_completions.tx_comp);
798}
799
800static void
801bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
802{
803 struct bnad_tx_info *tx_info =
804 (struct bnad_tx_info *)tcb->txq->tx->priv;
805 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
806
807 tx_info->tcb[tcb->id] = tcb;
808 unmap_q->producer_index = 0;
809 unmap_q->consumer_index = 0;
810 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
811}
812
813static void
814bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
815{
816 struct bnad_tx_info *tx_info =
817 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000818 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
819
820 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
821 cpu_relax();
822
823 bnad_free_all_txbufs(bnad, tcb);
824
825 unmap_q->producer_index = 0;
826 unmap_q->consumer_index = 0;
827
828 smp_mb__before_clear_bit();
829 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700830
831 tx_info->tcb[tcb->id] = NULL;
832}
833
834static void
835bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
836{
837 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
838
839 unmap_q->producer_index = 0;
840 unmap_q->consumer_index = 0;
841 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
842}
843
844static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000845bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
846{
847 bnad_free_all_rxbufs(bnad, rcb);
848}
849
850static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700851bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
852{
853 struct bnad_rx_info *rx_info =
854 (struct bnad_rx_info *)ccb->cq->rx->priv;
855
856 rx_info->rx_ctrl[ccb->id].ccb = ccb;
857 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
858}
859
860static void
861bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
862{
863 struct bnad_rx_info *rx_info =
864 (struct bnad_rx_info *)ccb->cq->rx->priv;
865
866 rx_info->rx_ctrl[ccb->id].ccb = NULL;
867}
868
869static void
870bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
871{
872 struct bnad_tx_info *tx_info =
873 (struct bnad_tx_info *)tcb->txq->tx->priv;
874
875 if (tx_info != &bnad->tx_info[0])
876 return;
877
Rasesh Modybe7fa322010-12-23 21:45:01 +0000878 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700879 netif_stop_queue(bnad->netdev);
880 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
881}
882
883static void
884bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
885{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000886 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
887
888 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700889 return;
890
Rasesh Modybe7fa322010-12-23 21:45:01 +0000891 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
892
893 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
894 cpu_relax();
895
896 bnad_free_all_txbufs(bnad, tcb);
897
898 unmap_q->producer_index = 0;
899 unmap_q->consumer_index = 0;
900
901 smp_mb__before_clear_bit();
902 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
903
904 /*
905 * Workaround for first device enable failure & we
906 * get a 0 MAC address. We try to get the MAC address
907 * again here.
908 */
909 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
910 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
911 bnad_set_netdev_perm_addr(bnad);
912 }
913
914 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
915
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700916 if (netif_carrier_ok(bnad->netdev)) {
917 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
918 netif_wake_queue(bnad->netdev);
919 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
920 }
921}
922
923static void
924bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
925{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000926 /* Delay only once for the whole Tx Path Shutdown */
927 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
928 mdelay(BNAD_TXRX_SYNC_MDELAY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700929}
930
931static void
932bnad_cb_rx_cleanup(struct bnad *bnad,
933 struct bna_ccb *ccb)
934{
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700935 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
936
Rasesh Modybe7fa322010-12-23 21:45:01 +0000937 if (ccb->rcb[1])
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700938 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000939
940 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
941 mdelay(BNAD_TXRX_SYNC_MDELAY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700942}
943
944static void
945bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
946{
947 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
948
Rasesh Modybe7fa322010-12-23 21:45:01 +0000949 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
950
951 if (rcb == rcb->cq->ccb->rcb[0])
952 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
953
954 bnad_free_all_rxbufs(bnad, rcb);
955
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700956 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
957
958 /* Now allocate & post buffers for this RCB */
959 /* !!Allocation in callback context */
960 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
961 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
962 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
963 bnad_alloc_n_post_rxbufs(bnad, rcb);
964 smp_mb__before_clear_bit();
965 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
966 }
967}
968
969static void
970bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
971 enum bna_cb_status status)
972{
973 struct bnad *bnad = (struct bnad *)arg;
974
975 complete(&bnad->bnad_completions.rx_comp);
976}
977
978static void
979bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
980 enum bna_cb_status status)
981{
982 bnad->bnad_completions.mcast_comp_status = status;
983 complete(&bnad->bnad_completions.mcast_comp);
984}
985
986void
987bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
988 struct bna_stats *stats)
989{
990 if (status == BNA_CB_SUCCESS)
991 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
992
993 if (!netif_running(bnad->netdev) ||
994 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
995 return;
996
997 mod_timer(&bnad->stats_timer,
998 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
999}
1000
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001001/* Resource allocation, free functions */
1002
1003static void
1004bnad_mem_free(struct bnad *bnad,
1005 struct bna_mem_info *mem_info)
1006{
1007 int i;
1008 dma_addr_t dma_pa;
1009
1010 if (mem_info->mdl == NULL)
1011 return;
1012
1013 for (i = 0; i < mem_info->num; i++) {
1014 if (mem_info->mdl[i].kva != NULL) {
1015 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017 dma_pa);
1018 pci_free_consistent(bnad->pcidev,
1019 mem_info->mdl[i].len,
1020 mem_info->mdl[i].kva, dma_pa);
1021 } else
1022 kfree(mem_info->mdl[i].kva);
1023 }
1024 }
1025 kfree(mem_info->mdl);
1026 mem_info->mdl = NULL;
1027}
1028
1029static int
1030bnad_mem_alloc(struct bnad *bnad,
1031 struct bna_mem_info *mem_info)
1032{
1033 int i;
1034 dma_addr_t dma_pa;
1035
1036 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1037 mem_info->mdl = NULL;
1038 return 0;
1039 }
1040
1041 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1042 GFP_KERNEL);
1043 if (mem_info->mdl == NULL)
1044 return -ENOMEM;
1045
1046 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1047 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva =
1050 pci_alloc_consistent(bnad->pcidev,
1051 mem_info->len, &dma_pa);
1052
1053 if (mem_info->mdl[i].kva == NULL)
1054 goto err_return;
1055
1056 BNA_SET_DMA_ADDR(dma_pa,
1057 &(mem_info->mdl[i].dma));
1058 }
1059 } else {
1060 for (i = 0; i < mem_info->num; i++) {
1061 mem_info->mdl[i].len = mem_info->len;
1062 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1063 GFP_KERNEL);
1064 if (mem_info->mdl[i].kva == NULL)
1065 goto err_return;
1066 }
1067 }
1068
1069 return 0;
1070
1071err_return:
1072 bnad_mem_free(bnad, mem_info);
1073 return -ENOMEM;
1074}
1075
1076/* Free IRQ for Mailbox */
1077static void
1078bnad_mbox_irq_free(struct bnad *bnad,
1079 struct bna_intr_info *intr_info)
1080{
1081 int irq;
1082 unsigned long flags;
1083
1084 if (intr_info->idl == NULL)
1085 return;
1086
1087 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001088 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001089 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001090
1091 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001092 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001093
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001094 kfree(intr_info->idl);
1095}
1096
1097/*
1098 * Allocates IRQ for Mailbox, but keep it disabled
1099 * This will be enabled once we get the mbox enable callback
1100 * from bna
1101 */
1102static int
1103bnad_mbox_irq_alloc(struct bnad *bnad,
1104 struct bna_intr_info *intr_info)
1105{
Rasesh Modybe7fa322010-12-23 21:45:01 +00001106 int err = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001107 unsigned long flags;
1108 u32 irq;
1109 irq_handler_t irq_handler;
1110
1111 /* Mbox should use only 1 vector */
1112
1113 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1114 if (!intr_info->idl)
1115 return -ENOMEM;
1116
1117 spin_lock_irqsave(&bnad->bna_lock, flags);
1118 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1119 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1120 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1121 flags = 0;
1122 intr_info->intr_type = BNA_INTR_T_MSIX;
1123 intr_info->idl[0].vector = bnad->msix_num - 1;
1124 } else {
1125 irq_handler = (irq_handler_t)bnad_isr;
1126 irq = bnad->pcidev->irq;
1127 flags = IRQF_SHARED;
1128 intr_info->intr_type = BNA_INTR_T_INTX;
1129 /* intr_info->idl.vector = 0 ? */
1130 }
1131 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1132
1133 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1134
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001135 /*
1136 * Set the Mbox IRQ disable flag, so that the IRQ handler
1137 * called from request_irq() for SHARED IRQs do not execute
1138 */
1139 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1140
Rasesh Modybe7fa322010-12-23 21:45:01 +00001141 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1142
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001143 err = request_irq(irq, irq_handler, flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001144 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001145
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001146 if (err) {
1147 kfree(intr_info->idl);
1148 intr_info->idl = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001149 }
1150
Rasesh Modybe7fa322010-12-23 21:45:01 +00001151 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001152}
1153
1154static void
1155bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1156{
1157 kfree(intr_info->idl);
1158 intr_info->idl = NULL;
1159}
1160
1161/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1162static int
1163bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1164 uint txrx_id, struct bna_intr_info *intr_info)
1165{
1166 int i, vector_start = 0;
1167 u32 cfg_flags;
1168 unsigned long flags;
1169
1170 spin_lock_irqsave(&bnad->bna_lock, flags);
1171 cfg_flags = bnad->cfg_flags;
1172 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1173
1174 if (cfg_flags & BNAD_CF_MSIX) {
1175 intr_info->intr_type = BNA_INTR_T_MSIX;
1176 intr_info->idl = kcalloc(intr_info->num,
1177 sizeof(struct bna_intr_descr),
1178 GFP_KERNEL);
1179 if (!intr_info->idl)
1180 return -ENOMEM;
1181
1182 switch (src) {
1183 case BNAD_INTR_TX:
1184 vector_start = txrx_id;
1185 break;
1186
1187 case BNAD_INTR_RX:
1188 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1189 txrx_id;
1190 break;
1191
1192 default:
1193 BUG();
1194 }
1195
1196 for (i = 0; i < intr_info->num; i++)
1197 intr_info->idl[i].vector = vector_start + i;
1198 } else {
1199 intr_info->intr_type = BNA_INTR_T_INTX;
1200 intr_info->num = 1;
1201 intr_info->idl = kcalloc(intr_info->num,
1202 sizeof(struct bna_intr_descr),
1203 GFP_KERNEL);
1204 if (!intr_info->idl)
1205 return -ENOMEM;
1206
1207 switch (src) {
1208 case BNAD_INTR_TX:
1209 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1210 break;
1211
1212 case BNAD_INTR_RX:
1213 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1214 break;
1215 }
1216 }
1217 return 0;
1218}
1219
1220/**
1221 * NOTE: Should be called for MSIX only
1222 * Unregisters Tx MSIX vector(s) from the kernel
1223 */
1224static void
1225bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1226 int num_txqs)
1227{
1228 int i;
1229 int vector_num;
1230
1231 for (i = 0; i < num_txqs; i++) {
1232 if (tx_info->tcb[i] == NULL)
1233 continue;
1234
1235 vector_num = tx_info->tcb[i]->intr_vector;
1236 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1237 }
1238}
1239
1240/**
1241 * NOTE: Should be called for MSIX only
1242 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1243 */
1244static int
1245bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1246 uint tx_id, int num_txqs)
1247{
1248 int i;
1249 int err;
1250 int vector_num;
1251
1252 for (i = 0; i < num_txqs; i++) {
1253 vector_num = tx_info->tcb[i]->intr_vector;
1254 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1255 tx_id + tx_info->tcb[i]->id);
1256 err = request_irq(bnad->msix_table[vector_num].vector,
1257 (irq_handler_t)bnad_msix_tx, 0,
1258 tx_info->tcb[i]->name,
1259 tx_info->tcb[i]);
1260 if (err)
1261 goto err_return;
1262 }
1263
1264 return 0;
1265
1266err_return:
1267 if (i > 0)
1268 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1269 return -1;
1270}
1271
1272/**
1273 * NOTE: Should be called for MSIX only
1274 * Unregisters Rx MSIX vector(s) from the kernel
1275 */
1276static void
1277bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1278 int num_rxps)
1279{
1280 int i;
1281 int vector_num;
1282
1283 for (i = 0; i < num_rxps; i++) {
1284 if (rx_info->rx_ctrl[i].ccb == NULL)
1285 continue;
1286
1287 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1288 free_irq(bnad->msix_table[vector_num].vector,
1289 rx_info->rx_ctrl[i].ccb);
1290 }
1291}
1292
1293/**
1294 * NOTE: Should be called for MSIX only
1295 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1296 */
1297static int
1298bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1299 uint rx_id, int num_rxps)
1300{
1301 int i;
1302 int err;
1303 int vector_num;
1304
1305 for (i = 0; i < num_rxps; i++) {
1306 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1307 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1308 bnad->netdev->name,
1309 rx_id + rx_info->rx_ctrl[i].ccb->id);
1310 err = request_irq(bnad->msix_table[vector_num].vector,
1311 (irq_handler_t)bnad_msix_rx, 0,
1312 rx_info->rx_ctrl[i].ccb->name,
1313 rx_info->rx_ctrl[i].ccb);
1314 if (err)
1315 goto err_return;
1316 }
1317
1318 return 0;
1319
1320err_return:
1321 if (i > 0)
1322 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1323 return -1;
1324}
1325
1326/* Free Tx object Resources */
1327static void
1328bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1329{
1330 int i;
1331
1332 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1333 if (res_info[i].res_type == BNA_RES_T_MEM)
1334 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1335 else if (res_info[i].res_type == BNA_RES_T_INTR)
1336 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1337 }
1338}
1339
1340/* Allocates memory and interrupt resources for Tx object */
1341static int
1342bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1343 uint tx_id)
1344{
1345 int i, err = 0;
1346
1347 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1348 if (res_info[i].res_type == BNA_RES_T_MEM)
1349 err = bnad_mem_alloc(bnad,
1350 &res_info[i].res_u.mem_info);
1351 else if (res_info[i].res_type == BNA_RES_T_INTR)
1352 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1353 &res_info[i].res_u.intr_info);
1354 if (err)
1355 goto err_return;
1356 }
1357 return 0;
1358
1359err_return:
1360 bnad_tx_res_free(bnad, res_info);
1361 return err;
1362}
1363
1364/* Free Rx object Resources */
1365static void
1366bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1367{
1368 int i;
1369
1370 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1371 if (res_info[i].res_type == BNA_RES_T_MEM)
1372 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1373 else if (res_info[i].res_type == BNA_RES_T_INTR)
1374 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1375 }
1376}
1377
1378/* Allocates memory and interrupt resources for Rx object */
1379static int
1380bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1381 uint rx_id)
1382{
1383 int i, err = 0;
1384
1385 /* All memory needs to be allocated before setup_ccbs */
1386 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1387 if (res_info[i].res_type == BNA_RES_T_MEM)
1388 err = bnad_mem_alloc(bnad,
1389 &res_info[i].res_u.mem_info);
1390 else if (res_info[i].res_type == BNA_RES_T_INTR)
1391 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1392 &res_info[i].res_u.intr_info);
1393 if (err)
1394 goto err_return;
1395 }
1396 return 0;
1397
1398err_return:
1399 bnad_rx_res_free(bnad, res_info);
1400 return err;
1401}
1402
1403/* Timer callbacks */
1404/* a) IOC timer */
1405static void
1406bnad_ioc_timeout(unsigned long data)
1407{
1408 struct bnad *bnad = (struct bnad *)data;
1409 unsigned long flags;
1410
1411 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8a891422010-08-25 23:00:27 -07001412 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001413 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1414}
1415
1416static void
1417bnad_ioc_hb_check(unsigned long data)
1418{
1419 struct bnad *bnad = (struct bnad *)data;
1420 unsigned long flags;
1421
1422 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8a891422010-08-25 23:00:27 -07001423 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001424 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1425}
1426
1427static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001428bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001429{
1430 struct bnad *bnad = (struct bnad *)data;
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001434 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1436}
1437
1438static void
1439bnad_iocpf_sem_timeout(unsigned long data)
1440{
1441 struct bnad *bnad = (struct bnad *)data;
1442 unsigned long flags;
1443
1444 spin_lock_irqsave(&bnad->bna_lock, flags);
1445 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001446 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1447}
1448
1449/*
1450 * All timer routines use bnad->bna_lock to protect against
1451 * the following race, which may occur in case of no locking:
1452 * Time CPU m CPU n
1453 * 0 1 = test_bit
1454 * 1 clear_bit
1455 * 2 del_timer_sync
1456 * 3 mod_timer
1457 */
1458
1459/* b) Dynamic Interrupt Moderation Timer */
1460static void
1461bnad_dim_timeout(unsigned long data)
1462{
1463 struct bnad *bnad = (struct bnad *)data;
1464 struct bnad_rx_info *rx_info;
1465 struct bnad_rx_ctrl *rx_ctrl;
1466 int i, j;
1467 unsigned long flags;
1468
1469 if (!netif_carrier_ok(bnad->netdev))
1470 return;
1471
1472 spin_lock_irqsave(&bnad->bna_lock, flags);
1473 for (i = 0; i < bnad->num_rx; i++) {
1474 rx_info = &bnad->rx_info[i];
1475 if (!rx_info->rx)
1476 continue;
1477 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1478 rx_ctrl = &rx_info->rx_ctrl[j];
1479 if (!rx_ctrl->ccb)
1480 continue;
1481 bna_rx_dim_update(rx_ctrl->ccb);
1482 }
1483 }
1484
1485 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1486 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1487 mod_timer(&bnad->dim_timer,
1488 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1489 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1490}
1491
1492/* c) Statistics Timer */
1493static void
1494bnad_stats_timeout(unsigned long data)
1495{
1496 struct bnad *bnad = (struct bnad *)data;
1497 unsigned long flags;
1498
1499 if (!netif_running(bnad->netdev) ||
1500 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1501 return;
1502
1503 spin_lock_irqsave(&bnad->bna_lock, flags);
1504 bna_stats_get(&bnad->bna);
1505 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1506}
1507
1508/*
1509 * Set up timer for DIM
1510 * Called with bnad->bna_lock held
1511 */
1512void
1513bnad_dim_timer_start(struct bnad *bnad)
1514{
1515 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1516 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1517 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1518 (unsigned long)bnad);
1519 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1520 mod_timer(&bnad->dim_timer,
1521 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1522 }
1523}
1524
1525/*
1526 * Set up timer for statistics
1527 * Called with mutex_lock(&bnad->conf_mutex) held
1528 */
1529static void
1530bnad_stats_timer_start(struct bnad *bnad)
1531{
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&bnad->bna_lock, flags);
1535 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1536 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1537 (unsigned long)bnad);
1538 mod_timer(&bnad->stats_timer,
1539 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1540 }
1541 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001542}
1543
1544/*
1545 * Stops the stats timer
1546 * Called with mutex_lock(&bnad->conf_mutex) held
1547 */
1548static void
1549bnad_stats_timer_stop(struct bnad *bnad)
1550{
1551 int to_del = 0;
1552 unsigned long flags;
1553
1554 spin_lock_irqsave(&bnad->bna_lock, flags);
1555 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1556 to_del = 1;
1557 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1558 if (to_del)
1559 del_timer_sync(&bnad->stats_timer);
1560}
1561
1562/* Utilities */
1563
1564static void
1565bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1566{
1567 int i = 1; /* Index 0 has broadcast address */
1568 struct netdev_hw_addr *mc_addr;
1569
1570 netdev_for_each_mc_addr(mc_addr, netdev) {
1571 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1572 ETH_ALEN);
1573 i++;
1574 }
1575}
1576
1577static int
1578bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1579{
1580 struct bnad_rx_ctrl *rx_ctrl =
1581 container_of(napi, struct bnad_rx_ctrl, napi);
1582 struct bna_ccb *ccb;
1583 struct bnad *bnad;
1584 int rcvd = 0;
1585
1586 ccb = rx_ctrl->ccb;
1587
1588 bnad = ccb->bnad;
1589
1590 if (!netif_carrier_ok(bnad->netdev))
1591 goto poll_exit;
1592
1593 rcvd = bnad_poll_cq(bnad, ccb, budget);
1594 if (rcvd == budget)
1595 return rcvd;
1596
1597poll_exit:
1598 napi_complete((napi));
1599
1600 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1601
1602 bnad_enable_rx_irq(bnad, ccb);
1603 return rcvd;
1604}
1605
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001606static void
1607bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1608{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001609 struct bnad_rx_ctrl *rx_ctrl;
1610 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001611
1612 /* Initialize & enable NAPI */
1613 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1614 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
Rasesh Modybe7fa322010-12-23 21:45:01 +00001615
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001616 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001617 bnad_napi_poll_rx, 64);
1618
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001619 napi_enable(&rx_ctrl->napi);
1620 }
1621}
1622
1623static void
1624bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1625{
1626 int i;
1627
1628 /* First disable and then clean up */
1629 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1630 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1631 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1632 }
1633}
1634
1635/* Should be held with conf_lock held */
1636void
1637bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1638{
1639 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1640 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1641 unsigned long flags;
1642
1643 if (!tx_info->tx)
1644 return;
1645
1646 init_completion(&bnad->bnad_completions.tx_comp);
1647 spin_lock_irqsave(&bnad->bna_lock, flags);
1648 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1649 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1650 wait_for_completion(&bnad->bnad_completions.tx_comp);
1651
1652 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1653 bnad_tx_msix_unregister(bnad, tx_info,
1654 bnad->num_txq_per_tx);
1655
1656 spin_lock_irqsave(&bnad->bna_lock, flags);
1657 bna_tx_destroy(tx_info->tx);
1658 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1659
1660 tx_info->tx = NULL;
1661
1662 if (0 == tx_id)
1663 tasklet_kill(&bnad->tx_free_tasklet);
1664
1665 bnad_tx_res_free(bnad, res_info);
1666}
1667
1668/* Should be held with conf_lock held */
1669int
1670bnad_setup_tx(struct bnad *bnad, uint tx_id)
1671{
1672 int err;
1673 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1674 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1675 struct bna_intr_info *intr_info =
1676 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1677 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1678 struct bna_tx_event_cbfn tx_cbfn;
1679 struct bna_tx *tx;
1680 unsigned long flags;
1681
1682 /* Initialize the Tx object configuration */
1683 tx_config->num_txq = bnad->num_txq_per_tx;
1684 tx_config->txq_depth = bnad->txq_depth;
1685 tx_config->tx_type = BNA_TX_T_REGULAR;
1686
1687 /* Initialize the tx event handlers */
1688 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1689 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1690 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1691 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1692 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1693
1694 /* Get BNA's resource requirement for one tx object */
1695 spin_lock_irqsave(&bnad->bna_lock, flags);
1696 bna_tx_res_req(bnad->num_txq_per_tx,
1697 bnad->txq_depth, res_info);
1698 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1699
1700 /* Fill Unmap Q memory requirements */
1701 BNAD_FILL_UNMAPQ_MEM_REQ(
1702 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1703 bnad->num_txq_per_tx,
1704 BNAD_TX_UNMAPQ_DEPTH);
1705
1706 /* Allocate resources */
1707 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1708 if (err)
1709 return err;
1710
1711 /* Ask BNA to create one Tx object, supplying required resources */
1712 spin_lock_irqsave(&bnad->bna_lock, flags);
1713 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1714 tx_info);
1715 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1716 if (!tx)
1717 goto err_return;
1718 tx_info->tx = tx;
1719
1720 /* Register ISR for the Tx object */
1721 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1722 err = bnad_tx_msix_register(bnad, tx_info,
1723 tx_id, bnad->num_txq_per_tx);
1724 if (err)
1725 goto err_return;
1726 }
1727
1728 spin_lock_irqsave(&bnad->bna_lock, flags);
1729 bna_tx_enable(tx);
1730 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1731
1732 return 0;
1733
1734err_return:
1735 bnad_tx_res_free(bnad, res_info);
1736 return err;
1737}
1738
1739/* Setup the rx config for bna_rx_create */
1740/* bnad decides the configuration */
1741static void
1742bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1743{
1744 rx_config->rx_type = BNA_RX_T_REGULAR;
1745 rx_config->num_paths = bnad->num_rxp_per_rx;
1746
1747 if (bnad->num_rxp_per_rx > 1) {
1748 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1749 rx_config->rss_config.hash_type =
1750 (BFI_RSS_T_V4_TCP |
1751 BFI_RSS_T_V6_TCP |
1752 BFI_RSS_T_V4_IP |
1753 BFI_RSS_T_V6_IP);
1754 rx_config->rss_config.hash_mask =
1755 bnad->num_rxp_per_rx - 1;
1756 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1757 sizeof(rx_config->rss_config.toeplitz_hash_key));
1758 } else {
1759 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1760 memset(&rx_config->rss_config, 0,
1761 sizeof(rx_config->rss_config));
1762 }
1763 rx_config->rxp_type = BNA_RXP_SLR;
1764 rx_config->q_depth = bnad->rxq_depth;
1765
1766 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1767
1768 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1769}
1770
1771/* Called with mutex_lock(&bnad->conf_mutex) held */
1772void
1773bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1774{
1775 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1776 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1777 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1778 unsigned long flags;
1779 int dim_timer_del = 0;
1780
1781 if (!rx_info->rx)
1782 return;
1783
1784 if (0 == rx_id) {
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 dim_timer_del = bnad_dim_timer_running(bnad);
1787 if (dim_timer_del)
1788 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1789 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1790 if (dim_timer_del)
1791 del_timer_sync(&bnad->dim_timer);
1792 }
1793
1794 bnad_napi_disable(bnad, rx_id);
1795
1796 init_completion(&bnad->bnad_completions.rx_comp);
1797 spin_lock_irqsave(&bnad->bna_lock, flags);
1798 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1799 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1800 wait_for_completion(&bnad->bnad_completions.rx_comp);
1801
1802 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1803 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1804
1805 spin_lock_irqsave(&bnad->bna_lock, flags);
1806 bna_rx_destroy(rx_info->rx);
1807 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1808
1809 rx_info->rx = NULL;
1810
1811 bnad_rx_res_free(bnad, res_info);
1812}
1813
1814/* Called with mutex_lock(&bnad->conf_mutex) held */
1815int
1816bnad_setup_rx(struct bnad *bnad, uint rx_id)
1817{
1818 int err;
1819 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1820 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1821 struct bna_intr_info *intr_info =
1822 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1823 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1824 struct bna_rx_event_cbfn rx_cbfn;
1825 struct bna_rx *rx;
1826 unsigned long flags;
1827
1828 /* Initialize the Rx object configuration */
1829 bnad_init_rx_config(bnad, rx_config);
1830
1831 /* Initialize the Rx event handlers */
1832 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001833 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001834 rx_cbfn.rcb_destroy_cbfn = NULL;
1835 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1836 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1837 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1838 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1839
1840 /* Get BNA's resource requirement for one Rx object */
1841 spin_lock_irqsave(&bnad->bna_lock, flags);
1842 bna_rx_res_req(rx_config, res_info);
1843 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1844
1845 /* Fill Unmap Q memory requirements */
1846 BNAD_FILL_UNMAPQ_MEM_REQ(
1847 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1848 rx_config->num_paths +
1849 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1850 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1851
1852 /* Allocate resource */
1853 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1854 if (err)
1855 return err;
1856
1857 /* Ask BNA to create one Rx object, supplying required resources */
1858 spin_lock_irqsave(&bnad->bna_lock, flags);
1859 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1860 rx_info);
1861 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1862 if (!rx)
1863 goto err_return;
1864 rx_info->rx = rx;
1865
1866 /* Register ISR for the Rx object */
1867 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1868 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1869 rx_config->num_paths);
1870 if (err)
1871 goto err_return;
1872 }
1873
1874 /* Enable NAPI */
1875 bnad_napi_enable(bnad, rx_id);
1876
1877 spin_lock_irqsave(&bnad->bna_lock, flags);
1878 if (0 == rx_id) {
1879 /* Set up Dynamic Interrupt Moderation Vector */
1880 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1881 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1882
1883 /* Enable VLAN filtering only on the default Rx */
1884 bna_rx_vlanfilter_enable(rx);
1885
1886 /* Start the DIM timer */
1887 bnad_dim_timer_start(bnad);
1888 }
1889
1890 bna_rx_enable(rx);
1891 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1892
1893 return 0;
1894
1895err_return:
1896 bnad_cleanup_rx(bnad, rx_id);
1897 return err;
1898}
1899
1900/* Called with conf_lock & bnad->bna_lock held */
1901void
1902bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1903{
1904 struct bnad_tx_info *tx_info;
1905
1906 tx_info = &bnad->tx_info[0];
1907 if (!tx_info->tx)
1908 return;
1909
1910 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1911}
1912
1913/* Called with conf_lock & bnad->bna_lock held */
1914void
1915bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1916{
1917 struct bnad_rx_info *rx_info;
1918 int i;
1919
1920 for (i = 0; i < bnad->num_rx; i++) {
1921 rx_info = &bnad->rx_info[i];
1922 if (!rx_info->rx)
1923 continue;
1924 bna_rx_coalescing_timeo_set(rx_info->rx,
1925 bnad->rx_coalescing_timeo);
1926 }
1927}
1928
1929/*
1930 * Called with bnad->bna_lock held
1931 */
1932static int
1933bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1934{
1935 int ret;
1936
1937 if (!is_valid_ether_addr(mac_addr))
1938 return -EADDRNOTAVAIL;
1939
1940 /* If datapath is down, pretend everything went through */
1941 if (!bnad->rx_info[0].rx)
1942 return 0;
1943
1944 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1945 if (ret != BNA_CB_SUCCESS)
1946 return -EADDRNOTAVAIL;
1947
1948 return 0;
1949}
1950
1951/* Should be called with conf_lock held */
1952static int
1953bnad_enable_default_bcast(struct bnad *bnad)
1954{
1955 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1956 int ret;
1957 unsigned long flags;
1958
1959 init_completion(&bnad->bnad_completions.mcast_comp);
1960
1961 spin_lock_irqsave(&bnad->bna_lock, flags);
1962 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1963 bnad_cb_rx_mcast_add);
1964 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1965
1966 if (ret == BNA_CB_SUCCESS)
1967 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1968 else
1969 return -ENODEV;
1970
1971 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1972 return -ENODEV;
1973
1974 return 0;
1975}
1976
Rasesh Modyaad75b62010-12-23 21:45:08 +00001977/* Called with bnad_conf_lock() held */
1978static void
1979bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1980{
1981 u16 vlan_id;
1982 unsigned long flags;
1983
1984 if (!bnad->vlan_grp)
1985 return;
1986
1987 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1988
1989 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
1990 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
1991 continue;
1992 spin_lock_irqsave(&bnad->bna_lock, flags);
1993 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
1994 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1995 }
1996}
1997
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001998/* Statistics utilities */
1999void
Eric Dumazet250e0612010-09-02 12:45:02 -07002000bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002001{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002002 int i, j;
2003
2004 for (i = 0; i < bnad->num_rx; i++) {
2005 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2006 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002007 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002008 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002009 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002010 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2011 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2012 bnad->rx_info[i].rx_ctrl[j].ccb->
2013 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002014 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002015 bnad->rx_info[i].rx_ctrl[j].
2016 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002017 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002018 bnad->rx_info[i].rx_ctrl[j].
2019 ccb->rcb[1]->rxq->rx_bytes;
2020 }
2021 }
2022 }
2023 }
2024 for (i = 0; i < bnad->num_tx; i++) {
2025 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2026 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002027 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002028 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002029 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002030 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2031 }
2032 }
2033 }
2034}
2035
2036/*
2037 * Must be called with the bna_lock held.
2038 */
2039void
Eric Dumazet250e0612010-09-02 12:45:02 -07002040bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002041{
2042 struct bfi_ll_stats_mac *mac_stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002043 u64 bmap;
2044 int i;
2045
2046 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002047 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002048 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2049 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2050 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002051 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002052 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002053 stats->rx_dropped = mac_stats->rx_drop;
2054 stats->tx_dropped = mac_stats->tx_drop;
2055 stats->multicast = mac_stats->rx_multicast;
2056 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002057
Eric Dumazet250e0612010-09-02 12:45:02 -07002058 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002059
2060 /* receive ring buffer overflow ?? */
2061
Eric Dumazet250e0612010-09-02 12:45:02 -07002062 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2063 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002064 /* recv'r fifo overrun */
2065 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2066 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2067 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2068 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002069 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002070 bnad->stats.bna_stats->
2071 hw_stats->rxf_stats[i].frame_drops;
2072 break;
2073 }
2074 bmap >>= 1;
2075 }
2076}
2077
2078static void
2079bnad_mbox_irq_sync(struct bnad *bnad)
2080{
2081 u32 irq;
2082 unsigned long flags;
2083
2084 spin_lock_irqsave(&bnad->bna_lock, flags);
2085 if (bnad->cfg_flags & BNAD_CF_MSIX)
2086 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2087 else
2088 irq = bnad->pcidev->irq;
2089 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2090
2091 synchronize_irq(irq);
2092}
2093
2094/* Utility used by bnad_start_xmit, for doing TSO */
2095static int
2096bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2097{
2098 int err;
2099
2100 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2101 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2102 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2103 if (skb_header_cloned(skb)) {
2104 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2105 if (err) {
2106 BNAD_UPDATE_CTR(bnad, tso_err);
2107 return err;
2108 }
2109 }
2110
2111 /*
2112 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2113 * excluding the length field.
2114 */
2115 if (skb->protocol == htons(ETH_P_IP)) {
2116 struct iphdr *iph = ip_hdr(skb);
2117
2118 /* Do we really need these? */
2119 iph->tot_len = 0;
2120 iph->check = 0;
2121
2122 tcp_hdr(skb)->check =
2123 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2124 IPPROTO_TCP, 0);
2125 BNAD_UPDATE_CTR(bnad, tso4);
2126 } else {
2127 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2128
2129 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2130 ipv6h->payload_len = 0;
2131 tcp_hdr(skb)->check =
2132 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2133 IPPROTO_TCP, 0);
2134 BNAD_UPDATE_CTR(bnad, tso6);
2135 }
2136
2137 return 0;
2138}
2139
2140/*
2141 * Initialize Q numbers depending on Rx Paths
2142 * Called with bnad->bna_lock held, because of cfg_flags
2143 * access.
2144 */
2145static void
2146bnad_q_num_init(struct bnad *bnad)
2147{
2148 int rxps;
2149
2150 rxps = min((uint)num_online_cpus(),
2151 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2152
2153 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2154 rxps = 1; /* INTx */
2155
2156 bnad->num_rx = 1;
2157 bnad->num_tx = 1;
2158 bnad->num_rxp_per_rx = rxps;
2159 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2160}
2161
2162/*
2163 * Adjusts the Q numbers, given a number of msix vectors
2164 * Give preference to RSS as opposed to Tx priority Queues,
2165 * in such a case, just use 1 Tx Q
2166 * Called with bnad->bna_lock held b'cos of cfg_flags access
2167 */
2168static void
2169bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2170{
2171 bnad->num_txq_per_tx = 1;
2172 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2173 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2174 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2175 bnad->num_rxp_per_rx = msix_vectors -
2176 (bnad->num_tx * bnad->num_txq_per_tx) -
2177 BNAD_MAILBOX_MSIX_VECTORS;
2178 } else
2179 bnad->num_rxp_per_rx = 1;
2180}
2181
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002182/* Enable / disable device */
2183static void
2184bnad_device_disable(struct bnad *bnad)
2185{
2186 unsigned long flags;
2187
2188 init_completion(&bnad->bnad_completions.ioc_comp);
2189
2190 spin_lock_irqsave(&bnad->bna_lock, flags);
2191 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2192 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2193
2194 wait_for_completion(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002195}
2196
2197static int
2198bnad_device_enable(struct bnad *bnad)
2199{
2200 int err = 0;
2201 unsigned long flags;
2202
2203 init_completion(&bnad->bnad_completions.ioc_comp);
2204
2205 spin_lock_irqsave(&bnad->bna_lock, flags);
2206 bna_device_enable(&bnad->bna.device);
2207 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2208
2209 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2210
2211 if (bnad->bnad_completions.ioc_comp_status)
2212 err = bnad->bnad_completions.ioc_comp_status;
2213
2214 return err;
2215}
2216
2217/* Free BNA resources */
2218static void
2219bnad_res_free(struct bnad *bnad)
2220{
2221 int i;
2222 struct bna_res_info *res_info = &bnad->res_info[0];
2223
2224 for (i = 0; i < BNA_RES_T_MAX; i++) {
2225 if (res_info[i].res_type == BNA_RES_T_MEM)
2226 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2227 else
2228 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2229 }
2230}
2231
2232/* Allocates memory and interrupt resources for BNA */
2233static int
2234bnad_res_alloc(struct bnad *bnad)
2235{
2236 int i, err;
2237 struct bna_res_info *res_info = &bnad->res_info[0];
2238
2239 for (i = 0; i < BNA_RES_T_MAX; i++) {
2240 if (res_info[i].res_type == BNA_RES_T_MEM)
2241 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2242 else
2243 err = bnad_mbox_irq_alloc(bnad,
2244 &res_info[i].res_u.intr_info);
2245 if (err)
2246 goto err_return;
2247 }
2248 return 0;
2249
2250err_return:
2251 bnad_res_free(bnad);
2252 return err;
2253}
2254
2255/* Interrupt enable / disable */
2256static void
2257bnad_enable_msix(struct bnad *bnad)
2258{
2259 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002260 unsigned long flags;
2261
2262 spin_lock_irqsave(&bnad->bna_lock, flags);
2263 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2264 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2265 return;
2266 }
2267 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2268
2269 if (bnad->msix_table)
2270 return;
2271
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002272 bnad->msix_table =
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002273 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002274
2275 if (!bnad->msix_table)
2276 goto intx_mode;
2277
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002278 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002279 bnad->msix_table[i].entry = i;
2280
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002281 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002282 if (ret > 0) {
2283 /* Not enough MSI-X vectors. */
2284
2285 spin_lock_irqsave(&bnad->bna_lock, flags);
2286 /* ret = #of vectors that we got */
2287 bnad_q_num_adjust(bnad, ret);
2288 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2289
2290 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2291 + (bnad->num_rx
2292 * bnad->num_rxp_per_rx) +
2293 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002294
2295 /* Try once more with adjusted numbers */
2296 /* If this fails, fall back to INTx */
2297 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002298 bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002299 if (ret)
2300 goto intx_mode;
2301
2302 } else if (ret < 0)
2303 goto intx_mode;
2304 return;
2305
2306intx_mode:
2307
2308 kfree(bnad->msix_table);
2309 bnad->msix_table = NULL;
2310 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002311 spin_lock_irqsave(&bnad->bna_lock, flags);
2312 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2313 bnad_q_num_init(bnad);
2314 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2315}
2316
2317static void
2318bnad_disable_msix(struct bnad *bnad)
2319{
2320 u32 cfg_flags;
2321 unsigned long flags;
2322
2323 spin_lock_irqsave(&bnad->bna_lock, flags);
2324 cfg_flags = bnad->cfg_flags;
2325 if (bnad->cfg_flags & BNAD_CF_MSIX)
2326 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2327 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2328
2329 if (cfg_flags & BNAD_CF_MSIX) {
2330 pci_disable_msix(bnad->pcidev);
2331 kfree(bnad->msix_table);
2332 bnad->msix_table = NULL;
2333 }
2334}
2335
2336/* Netdev entry points */
2337static int
2338bnad_open(struct net_device *netdev)
2339{
2340 int err;
2341 struct bnad *bnad = netdev_priv(netdev);
2342 struct bna_pause_config pause_config;
2343 int mtu;
2344 unsigned long flags;
2345
2346 mutex_lock(&bnad->conf_mutex);
2347
2348 /* Tx */
2349 err = bnad_setup_tx(bnad, 0);
2350 if (err)
2351 goto err_return;
2352
2353 /* Rx */
2354 err = bnad_setup_rx(bnad, 0);
2355 if (err)
2356 goto cleanup_tx;
2357
2358 /* Port */
2359 pause_config.tx_pause = 0;
2360 pause_config.rx_pause = 0;
2361
2362 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2363
2364 spin_lock_irqsave(&bnad->bna_lock, flags);
2365 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2366 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2367 bna_port_enable(&bnad->bna.port);
2368 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2369
2370 /* Enable broadcast */
2371 bnad_enable_default_bcast(bnad);
2372
Rasesh Modyaad75b62010-12-23 21:45:08 +00002373 /* Restore VLANs, if any */
2374 bnad_restore_vlans(bnad, 0);
2375
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002376 /* Set the UCAST address */
2377 spin_lock_irqsave(&bnad->bna_lock, flags);
2378 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2379 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2380
2381 /* Start the stats timer */
2382 bnad_stats_timer_start(bnad);
2383
2384 mutex_unlock(&bnad->conf_mutex);
2385
2386 return 0;
2387
2388cleanup_tx:
2389 bnad_cleanup_tx(bnad, 0);
2390
2391err_return:
2392 mutex_unlock(&bnad->conf_mutex);
2393 return err;
2394}
2395
2396static int
2397bnad_stop(struct net_device *netdev)
2398{
2399 struct bnad *bnad = netdev_priv(netdev);
2400 unsigned long flags;
2401
2402 mutex_lock(&bnad->conf_mutex);
2403
2404 /* Stop the stats timer */
2405 bnad_stats_timer_stop(bnad);
2406
2407 init_completion(&bnad->bnad_completions.port_comp);
2408
2409 spin_lock_irqsave(&bnad->bna_lock, flags);
2410 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2411 bnad_cb_port_disabled);
2412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2413
2414 wait_for_completion(&bnad->bnad_completions.port_comp);
2415
2416 bnad_cleanup_tx(bnad, 0);
2417 bnad_cleanup_rx(bnad, 0);
2418
2419 /* Synchronize mailbox IRQ */
2420 bnad_mbox_irq_sync(bnad);
2421
2422 mutex_unlock(&bnad->conf_mutex);
2423
2424 return 0;
2425}
2426
2427/* TX */
2428/*
2429 * bnad_start_xmit : Netdev entry point for Transmit
2430 * Called under lock held by net_device
2431 */
2432static netdev_tx_t
2433bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2434{
2435 struct bnad *bnad = netdev_priv(netdev);
2436
2437 u16 txq_prod, vlan_tag = 0;
2438 u32 unmap_prod, wis, wis_used, wi_range;
2439 u32 vectors, vect_id, i, acked;
2440 u32 tx_id;
2441 int err;
2442
2443 struct bnad_tx_info *tx_info;
2444 struct bna_tcb *tcb;
2445 struct bnad_unmap_q *unmap_q;
2446 dma_addr_t dma_addr;
2447 struct bna_txq_entry *txqent;
2448 bna_txq_wi_ctrl_flag_t flags;
2449
2450 if (unlikely
2451 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2452 dev_kfree_skb(skb);
2453 return NETDEV_TX_OK;
2454 }
2455
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002456 tx_id = 0;
2457
2458 tx_info = &bnad->tx_info[tx_id];
2459 tcb = tx_info->tcb[tx_id];
2460 unmap_q = tcb->unmap_q;
2461
Rasesh Modybe7fa322010-12-23 21:45:01 +00002462 /*
2463 * Takes care of the Tx that is scheduled between clearing the flag
2464 * and the netif_stop_queue() call.
2465 */
2466 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2467 dev_kfree_skb(skb);
2468 return NETDEV_TX_OK;
2469 }
2470
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002471 vectors = 1 + skb_shinfo(skb)->nr_frags;
2472 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2473 dev_kfree_skb(skb);
2474 return NETDEV_TX_OK;
2475 }
2476 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2477 acked = 0;
2478 if (unlikely
2479 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2480 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2481 if ((u16) (*tcb->hw_consumer_index) !=
2482 tcb->consumer_index &&
2483 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2484 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002485 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2486 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002487 smp_mb__before_clear_bit();
2488 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2489 } else {
2490 netif_stop_queue(netdev);
2491 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2492 }
2493
2494 smp_mb();
2495 /*
2496 * Check again to deal with race condition between
2497 * netif_stop_queue here, and netif_wake_queue in
2498 * interrupt handler which is not inside netif tx lock.
2499 */
2500 if (likely
2501 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2502 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2503 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2504 return NETDEV_TX_BUSY;
2505 } else {
2506 netif_wake_queue(netdev);
2507 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2508 }
2509 }
2510
2511 unmap_prod = unmap_q->producer_index;
2512 wis_used = 1;
2513 vect_id = 0;
2514 flags = 0;
2515
2516 txq_prod = tcb->producer_index;
2517 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2518 BUG_ON(!(wi_range <= tcb->q_depth));
2519 txqent->hdr.wi.reserved = 0;
2520 txqent->hdr.wi.num_vectors = vectors;
2521 txqent->hdr.wi.opcode =
2522 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2523 BNA_TXQ_WI_SEND));
2524
Jesse Grosseab6d182010-10-20 13:56:03 +00002525 if (vlan_tx_tag_present(skb)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002526 vlan_tag = (u16) vlan_tx_tag_get(skb);
2527 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2528 }
2529 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2530 vlan_tag =
2531 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2532 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2533 }
2534
2535 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2536
2537 if (skb_is_gso(skb)) {
2538 err = bnad_tso_prepare(bnad, skb);
2539 if (err) {
2540 dev_kfree_skb(skb);
2541 return NETDEV_TX_OK;
2542 }
2543 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2544 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2545 txqent->hdr.wi.l4_hdr_size_n_offset =
2546 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2547 (tcp_hdrlen(skb) >> 2,
2548 skb_transport_offset(skb)));
2549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2550 u8 proto = 0;
2551
2552 txqent->hdr.wi.lso_mss = 0;
2553
2554 if (skb->protocol == htons(ETH_P_IP))
2555 proto = ip_hdr(skb)->protocol;
2556 else if (skb->protocol == htons(ETH_P_IPV6)) {
2557 /* nexthdr may not be TCP immediately. */
2558 proto = ipv6_hdr(skb)->nexthdr;
2559 }
2560 if (proto == IPPROTO_TCP) {
2561 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2562 txqent->hdr.wi.l4_hdr_size_n_offset =
2563 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2564 (0, skb_transport_offset(skb)));
2565
2566 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2567
2568 BUG_ON(!(skb_headlen(skb) >=
2569 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2570
2571 } else if (proto == IPPROTO_UDP) {
2572 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2573 txqent->hdr.wi.l4_hdr_size_n_offset =
2574 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2575 (0, skb_transport_offset(skb)));
2576
2577 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2578
2579 BUG_ON(!(skb_headlen(skb) >=
2580 skb_transport_offset(skb) +
2581 sizeof(struct udphdr)));
2582 } else {
2583 err = skb_checksum_help(skb);
2584 BNAD_UPDATE_CTR(bnad, csum_help);
2585 if (err) {
2586 dev_kfree_skb(skb);
2587 BNAD_UPDATE_CTR(bnad, csum_help_err);
2588 return NETDEV_TX_OK;
2589 }
2590 }
2591 } else {
2592 txqent->hdr.wi.lso_mss = 0;
2593 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2594 }
2595
2596 txqent->hdr.wi.flags = htons(flags);
2597
2598 txqent->hdr.wi.frame_length = htonl(skb->len);
2599
2600 unmap_q->unmap_array[unmap_prod].skb = skb;
2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2604 PCI_DMA_TODEVICE);
2605 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606 dma_addr);
2607
2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2609 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2610
2611 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2612 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2613 u32 size = frag->size;
2614
2615 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2616 vect_id = 0;
2617 if (--wi_range)
2618 txqent++;
2619 else {
2620 BNA_QE_INDX_ADD(txq_prod, wis_used,
2621 tcb->q_depth);
2622 wis_used = 0;
2623 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2624 txqent, wi_range);
2625 BUG_ON(!(wi_range <= tcb->q_depth));
2626 }
2627 wis_used++;
2628 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2629 }
2630
2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632 txqent->vector[vect_id].length = htons(size);
2633 dma_addr =
2634 pci_map_page(bnad->pcidev, frag->page,
2635 frag->page_offset, size,
2636 PCI_DMA_TODEVICE);
2637 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638 dma_addr);
2639 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2641 }
2642
2643 unmap_q->producer_index = unmap_prod;
2644 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2645 tcb->producer_index = txq_prod;
2646
2647 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00002648
2649 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2650 return NETDEV_TX_OK;
2651
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002652 bna_txq_prod_indx_doorbell(tcb);
2653
2654 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2655 tasklet_schedule(&bnad->tx_free_tasklet);
2656
2657 return NETDEV_TX_OK;
2658}
2659
2660/*
2661 * Used spin_lock to synchronize reading of stats structures, which
2662 * is written by BNA under the same lock.
2663 */
Eric Dumazet250e0612010-09-02 12:45:02 -07002664static struct rtnl_link_stats64 *
2665bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002666{
2667 struct bnad *bnad = netdev_priv(netdev);
2668 unsigned long flags;
2669
2670 spin_lock_irqsave(&bnad->bna_lock, flags);
2671
Eric Dumazet250e0612010-09-02 12:45:02 -07002672 bnad_netdev_qstats_fill(bnad, stats);
2673 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002674
2675 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2676
Eric Dumazet250e0612010-09-02 12:45:02 -07002677 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002678}
2679
2680static void
2681bnad_set_rx_mode(struct net_device *netdev)
2682{
2683 struct bnad *bnad = netdev_priv(netdev);
2684 u32 new_mask, valid_mask;
2685 unsigned long flags;
2686
2687 spin_lock_irqsave(&bnad->bna_lock, flags);
2688
2689 new_mask = valid_mask = 0;
2690
2691 if (netdev->flags & IFF_PROMISC) {
2692 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2693 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2694 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2695 bnad->cfg_flags |= BNAD_CF_PROMISC;
2696 }
2697 } else {
2698 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2699 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2700 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2701 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2702 }
2703 }
2704
2705 if (netdev->flags & IFF_ALLMULTI) {
2706 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2707 new_mask |= BNA_RXMODE_ALLMULTI;
2708 valid_mask |= BNA_RXMODE_ALLMULTI;
2709 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2710 }
2711 } else {
2712 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2713 new_mask &= ~BNA_RXMODE_ALLMULTI;
2714 valid_mask |= BNA_RXMODE_ALLMULTI;
2715 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2716 }
2717 }
2718
2719 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2720
2721 if (!netdev_mc_empty(netdev)) {
2722 u8 *mcaddr_list;
2723 int mc_count = netdev_mc_count(netdev);
2724
2725 /* Index 0 holds the broadcast address */
2726 mcaddr_list =
2727 kzalloc((mc_count + 1) * ETH_ALEN,
2728 GFP_ATOMIC);
2729 if (!mcaddr_list)
Jiri Slabyca1cef32010-09-04 02:08:41 +00002730 goto unlock;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002731
2732 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2733
2734 /* Copy rest of the MC addresses */
2735 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2736
2737 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2738 mcaddr_list, NULL);
2739
2740 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2741 kfree(mcaddr_list);
2742 }
Jiri Slabyca1cef32010-09-04 02:08:41 +00002743unlock:
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002744 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2745}
2746
2747/*
2748 * bna_lock is used to sync writes to netdev->addr
2749 * conf_lock cannot be used since this call may be made
2750 * in a non-blocking context.
2751 */
2752static int
2753bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2754{
2755 int err;
2756 struct bnad *bnad = netdev_priv(netdev);
2757 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2758 unsigned long flags;
2759
2760 spin_lock_irqsave(&bnad->bna_lock, flags);
2761
2762 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2763
2764 if (!err)
2765 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2766
2767 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2768
2769 return err;
2770}
2771
2772static int
2773bnad_change_mtu(struct net_device *netdev, int new_mtu)
2774{
2775 int mtu, err = 0;
2776 unsigned long flags;
2777
2778 struct bnad *bnad = netdev_priv(netdev);
2779
2780 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2781 return -EINVAL;
2782
2783 mutex_lock(&bnad->conf_mutex);
2784
2785 netdev->mtu = new_mtu;
2786
2787 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2788
2789 spin_lock_irqsave(&bnad->bna_lock, flags);
2790 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2791 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2792
2793 mutex_unlock(&bnad->conf_mutex);
2794 return err;
2795}
2796
2797static void
2798bnad_vlan_rx_register(struct net_device *netdev,
2799 struct vlan_group *vlan_grp)
2800{
2801 struct bnad *bnad = netdev_priv(netdev);
2802
2803 mutex_lock(&bnad->conf_mutex);
2804 bnad->vlan_grp = vlan_grp;
2805 mutex_unlock(&bnad->conf_mutex);
2806}
2807
2808static void
2809bnad_vlan_rx_add_vid(struct net_device *netdev,
2810 unsigned short vid)
2811{
2812 struct bnad *bnad = netdev_priv(netdev);
2813 unsigned long flags;
2814
2815 if (!bnad->rx_info[0].rx)
2816 return;
2817
2818 mutex_lock(&bnad->conf_mutex);
2819
2820 spin_lock_irqsave(&bnad->bna_lock, flags);
2821 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2822 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2823
2824 mutex_unlock(&bnad->conf_mutex);
2825}
2826
2827static void
2828bnad_vlan_rx_kill_vid(struct net_device *netdev,
2829 unsigned short vid)
2830{
2831 struct bnad *bnad = netdev_priv(netdev);
2832 unsigned long flags;
2833
2834 if (!bnad->rx_info[0].rx)
2835 return;
2836
2837 mutex_lock(&bnad->conf_mutex);
2838
2839 spin_lock_irqsave(&bnad->bna_lock, flags);
2840 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2841 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2842
2843 mutex_unlock(&bnad->conf_mutex);
2844}
2845
2846#ifdef CONFIG_NET_POLL_CONTROLLER
2847static void
2848bnad_netpoll(struct net_device *netdev)
2849{
2850 struct bnad *bnad = netdev_priv(netdev);
2851 struct bnad_rx_info *rx_info;
2852 struct bnad_rx_ctrl *rx_ctrl;
2853 u32 curr_mask;
2854 int i, j;
2855
2856 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2857 bna_intx_disable(&bnad->bna, curr_mask);
2858 bnad_isr(bnad->pcidev->irq, netdev);
2859 bna_intx_enable(&bnad->bna, curr_mask);
2860 } else {
2861 for (i = 0; i < bnad->num_rx; i++) {
2862 rx_info = &bnad->rx_info[i];
2863 if (!rx_info->rx)
2864 continue;
2865 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2866 rx_ctrl = &rx_info->rx_ctrl[j];
2867 if (rx_ctrl->ccb) {
2868 bnad_disable_rx_irq(bnad,
2869 rx_ctrl->ccb);
2870 bnad_netif_rx_schedule_poll(bnad,
2871 rx_ctrl->ccb);
2872 }
2873 }
2874 }
2875 }
2876}
2877#endif
2878
2879static const struct net_device_ops bnad_netdev_ops = {
2880 .ndo_open = bnad_open,
2881 .ndo_stop = bnad_stop,
2882 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07002883 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002884 .ndo_set_rx_mode = bnad_set_rx_mode,
2885 .ndo_set_multicast_list = bnad_set_rx_mode,
2886 .ndo_validate_addr = eth_validate_addr,
2887 .ndo_set_mac_address = bnad_set_mac_address,
2888 .ndo_change_mtu = bnad_change_mtu,
2889 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2890 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2891 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2892#ifdef CONFIG_NET_POLL_CONTROLLER
2893 .ndo_poll_controller = bnad_netpoll
2894#endif
2895};
2896
2897static void
2898bnad_netdev_init(struct bnad *bnad, bool using_dac)
2899{
2900 struct net_device *netdev = bnad->netdev;
2901
2902 netdev->features |= NETIF_F_IPV6_CSUM;
2903 netdev->features |= NETIF_F_TSO;
2904 netdev->features |= NETIF_F_TSO6;
2905
2906 netdev->features |= NETIF_F_GRO;
2907 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2908
2909 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2910
2911 if (using_dac)
2912 netdev->features |= NETIF_F_HIGHDMA;
2913
2914 netdev->features |=
2915 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2916 NETIF_F_HW_VLAN_FILTER;
2917
2918 netdev->vlan_features = netdev->features;
2919 netdev->mem_start = bnad->mmio_start;
2920 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2921
2922 netdev->netdev_ops = &bnad_netdev_ops;
2923 bnad_set_ethtool_ops(netdev);
2924}
2925
2926/*
2927 * 1. Initialize the bnad structure
2928 * 2. Setup netdev pointer in pci_dev
2929 * 3. Initialze Tx free tasklet
2930 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2931 */
2932static int
2933bnad_init(struct bnad *bnad,
2934 struct pci_dev *pdev, struct net_device *netdev)
2935{
2936 unsigned long flags;
2937
2938 SET_NETDEV_DEV(netdev, &pdev->dev);
2939 pci_set_drvdata(pdev, netdev);
2940
2941 bnad->netdev = netdev;
2942 bnad->pcidev = pdev;
2943 bnad->mmio_start = pci_resource_start(pdev, 0);
2944 bnad->mmio_len = pci_resource_len(pdev, 0);
2945 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2946 if (!bnad->bar0) {
2947 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2948 pci_set_drvdata(pdev, NULL);
2949 return -ENOMEM;
2950 }
2951 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2952 (unsigned long long) bnad->mmio_len);
2953
2954 spin_lock_irqsave(&bnad->bna_lock, flags);
2955 if (!bnad_msix_disable)
2956 bnad->cfg_flags = BNAD_CF_MSIX;
2957
2958 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2959
2960 bnad_q_num_init(bnad);
2961 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2962
2963 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2964 (bnad->num_rx * bnad->num_rxp_per_rx) +
2965 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002966
2967 bnad->txq_depth = BNAD_TXQ_DEPTH;
2968 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2969 bnad->rx_csum = true;
2970
2971 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2972 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2973
2974 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2975 (unsigned long)bnad);
2976
2977 return 0;
2978}
2979
2980/*
2981 * Must be called after bnad_pci_uninit()
2982 * so that iounmap() and pci_set_drvdata(NULL)
2983 * happens only after PCI uninitialization.
2984 */
2985static void
2986bnad_uninit(struct bnad *bnad)
2987{
2988 if (bnad->bar0)
2989 iounmap(bnad->bar0);
2990 pci_set_drvdata(bnad->pcidev, NULL);
2991}
2992
2993/*
2994 * Initialize locks
2995 a) Per device mutes used for serializing configuration
2996 changes from OS interface
2997 b) spin lock used to protect bna state machine
2998 */
2999static void
3000bnad_lock_init(struct bnad *bnad)
3001{
3002 spin_lock_init(&bnad->bna_lock);
3003 mutex_init(&bnad->conf_mutex);
3004}
3005
3006static void
3007bnad_lock_uninit(struct bnad *bnad)
3008{
3009 mutex_destroy(&bnad->conf_mutex);
3010}
3011
3012/* PCI Initialization */
3013static int
3014bnad_pci_init(struct bnad *bnad,
3015 struct pci_dev *pdev, bool *using_dac)
3016{
3017 int err;
3018
3019 err = pci_enable_device(pdev);
3020 if (err)
3021 return err;
3022 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err)
3024 goto disable_device;
3025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3026 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3027 *using_dac = 1;
3028 } else {
3029 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3030 if (err) {
3031 err = pci_set_consistent_dma_mask(pdev,
3032 DMA_BIT_MASK(32));
3033 if (err)
3034 goto release_regions;
3035 }
3036 *using_dac = 0;
3037 }
3038 pci_set_master(pdev);
3039 return 0;
3040
3041release_regions:
3042 pci_release_regions(pdev);
3043disable_device:
3044 pci_disable_device(pdev);
3045
3046 return err;
3047}
3048
3049static void
3050bnad_pci_uninit(struct pci_dev *pdev)
3051{
3052 pci_release_regions(pdev);
3053 pci_disable_device(pdev);
3054}
3055
3056static int __devinit
3057bnad_pci_probe(struct pci_dev *pdev,
3058 const struct pci_device_id *pcidev_id)
3059{
Rasesh Modyaad75b62010-12-23 21:45:08 +00003060 bool using_dac = false;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003061 int err;
3062 struct bnad *bnad;
3063 struct bna *bna;
3064 struct net_device *netdev;
3065 struct bfa_pcidev pcidev_info;
3066 unsigned long flags;
3067
3068 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3069 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3070
3071 mutex_lock(&bnad_fwimg_mutex);
3072 if (!cna_get_firmware_buf(pdev)) {
3073 mutex_unlock(&bnad_fwimg_mutex);
3074 pr_warn("Failed to load Firmware Image!\n");
3075 return -ENODEV;
3076 }
3077 mutex_unlock(&bnad_fwimg_mutex);
3078
3079 /*
3080 * Allocates sizeof(struct net_device + struct bnad)
3081 * bnad = netdev->priv
3082 */
3083 netdev = alloc_etherdev(sizeof(struct bnad));
3084 if (!netdev) {
3085 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3086 err = -ENOMEM;
3087 return err;
3088 }
3089 bnad = netdev_priv(netdev);
3090
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003091 /*
3092 * PCI initialization
3093 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003094 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003095 */
3096 err = bnad_pci_init(bnad, pdev, &using_dac);
3097 if (err)
3098 goto free_netdev;
3099
3100 bnad_lock_init(bnad);
3101 /*
3102 * Initialize bnad structure
3103 * Setup relation between pci_dev & netdev
3104 * Init Tx free tasklet
3105 */
3106 err = bnad_init(bnad, pdev, netdev);
3107 if (err)
3108 goto pci_uninit;
3109 /* Initialize netdev structure, set up ethtool ops */
3110 bnad_netdev_init(bnad, using_dac);
3111
Rasesh Mody815f41e2010-12-23 21:45:03 +00003112 /* Set link to down state */
3113 netif_carrier_off(netdev);
3114
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003115 bnad_enable_msix(bnad);
3116
3117 /* Get resource requirement form bna */
3118 bna_res_req(&bnad->res_info[0]);
3119
3120 /* Allocate resources from bna */
3121 err = bnad_res_alloc(bnad);
3122 if (err)
3123 goto free_netdev;
3124
3125 bna = &bnad->bna;
3126
3127 /* Setup pcidev_info for bna_init() */
3128 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3129 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3130 pcidev_info.device_id = bnad->pcidev->device;
3131 pcidev_info.pci_bar_kva = bnad->bar0;
3132
3133 mutex_lock(&bnad->conf_mutex);
3134
3135 spin_lock_irqsave(&bnad->bna_lock, flags);
3136 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3138
3139 bnad->stats.bna_stats = &bna->stats;
3140
3141 /* Set up timers */
3142 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3143 ((unsigned long)bnad));
3144 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3145 ((unsigned long)bnad));
Rasesh Mody1d32f762010-12-23 21:45:09 +00003146 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3147 ((unsigned long)bnad));
3148 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003149 ((unsigned long)bnad));
3150
3151 /* Now start the timer before calling IOC */
Rasesh Mody1d32f762010-12-23 21:45:09 +00003152 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003153 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3154
3155 /*
3156 * Start the chip
3157 * Don't care even if err != 0, bna state machine will
3158 * deal with it
3159 */
3160 err = bnad_device_enable(bnad);
3161
3162 /* Get the burnt-in mac */
3163 spin_lock_irqsave(&bnad->bna_lock, flags);
3164 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3165 bnad_set_netdev_perm_addr(bnad);
3166 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3167
3168 mutex_unlock(&bnad->conf_mutex);
3169
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003170 /* Finally, reguister with net_device layer */
3171 err = register_netdev(netdev);
3172 if (err) {
3173 pr_err("BNA : Registering with netdev failed\n");
3174 goto disable_device;
3175 }
3176
3177 return 0;
3178
3179disable_device:
3180 mutex_lock(&bnad->conf_mutex);
3181 bnad_device_disable(bnad);
3182 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3183 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3184 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3185 spin_lock_irqsave(&bnad->bna_lock, flags);
3186 bna_uninit(bna);
3187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3188 mutex_unlock(&bnad->conf_mutex);
3189
3190 bnad_res_free(bnad);
3191 bnad_disable_msix(bnad);
3192pci_uninit:
3193 bnad_pci_uninit(pdev);
3194 bnad_lock_uninit(bnad);
3195 bnad_uninit(bnad);
3196free_netdev:
3197 free_netdev(netdev);
3198 return err;
3199}
3200
3201static void __devexit
3202bnad_pci_remove(struct pci_dev *pdev)
3203{
3204 struct net_device *netdev = pci_get_drvdata(pdev);
3205 struct bnad *bnad;
3206 struct bna *bna;
3207 unsigned long flags;
3208
3209 if (!netdev)
3210 return;
3211
3212 pr_info("%s bnad_pci_remove\n", netdev->name);
3213 bnad = netdev_priv(netdev);
3214 bna = &bnad->bna;
3215
3216 unregister_netdev(netdev);
3217
3218 mutex_lock(&bnad->conf_mutex);
3219 bnad_device_disable(bnad);
3220 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3221 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3222 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3223 spin_lock_irqsave(&bnad->bna_lock, flags);
3224 bna_uninit(bna);
3225 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3226 mutex_unlock(&bnad->conf_mutex);
3227
3228 bnad_res_free(bnad);
3229 bnad_disable_msix(bnad);
3230 bnad_pci_uninit(pdev);
3231 bnad_lock_uninit(bnad);
3232 bnad_uninit(bnad);
3233 free_netdev(netdev);
3234}
3235
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00003236static const struct pci_device_id bnad_pci_id_table[] = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003237 {
3238 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3239 PCI_DEVICE_ID_BROCADE_CT),
3240 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3241 .class_mask = 0xffff00
3242 }, {0, }
3243};
3244
3245MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3246
3247static struct pci_driver bnad_pci_driver = {
3248 .name = BNAD_NAME,
3249 .id_table = bnad_pci_id_table,
3250 .probe = bnad_pci_probe,
3251 .remove = __devexit_p(bnad_pci_remove),
3252};
3253
3254static int __init
3255bnad_module_init(void)
3256{
3257 int err;
3258
3259 pr_info("Brocade 10G Ethernet driver\n");
3260
Rasesh Mody8a891422010-08-25 23:00:27 -07003261 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003262
3263 err = pci_register_driver(&bnad_pci_driver);
3264 if (err < 0) {
3265 pr_err("bna : PCI registration failed in module init "
3266 "(%d)\n", err);
3267 return err;
3268 }
3269
3270 return 0;
3271}
3272
3273static void __exit
3274bnad_module_exit(void)
3275{
3276 pci_unregister_driver(&bnad_pci_driver);
3277
3278 if (bfi_fw)
3279 release_firmware(bfi_fw);
3280}
3281
3282module_init(bnad_module_init);
3283module_exit(bnad_module_exit);
3284
3285MODULE_AUTHOR("Brocade");
3286MODULE_LICENSE("GPL");
3287MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3288MODULE_VERSION(BNAD_VERSION);
3289MODULE_FIRMWARE(CNA_FW_FILE_CT);