blob: 795b93b73b1257767c6d9397ad7f7ebd4147ef21 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/etherdevice.h>
21#include <linux/in.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/if_ether.h>
25#include <linux/ip.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040026#include <linux/prefetch.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070027
28#include "bnad.h"
29#include "bna.h"
30#include "cna.h"
31
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000032static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070033
34/*
35 * Module params
36 */
37static uint bnad_msix_disable;
38module_param(bnad_msix_disable, uint, 0444);
39MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40
41static uint bnad_ioc_auto_recover = 1;
42module_param(bnad_ioc_auto_recover, uint, 0444);
43MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
44
45/*
46 * Global variables
47 */
48u32 bnad_rxqs_per_cq = 2;
49
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000050static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Rasesh Mody8b230ed2010-08-23 20:24:12 -070051
52/*
53 * Local MACROS
54 */
55#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56
57#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58
59#define BNAD_GET_MBOX_IRQ(_bnad) \
60 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
61 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
62 ((_bnad)->pcidev->irq))
63
64#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
65do { \
66 (_res_info)->res_type = BNA_RES_T_MEM; \
67 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
68 (_res_info)->res_u.mem_info.num = (_num); \
69 (_res_info)->res_u.mem_info.len = \
70 sizeof(struct bnad_unmap_q) + \
71 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
72} while (0)
73
Rasesh Modybe7fa322010-12-23 21:45:01 +000074#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
75
Rasesh Mody8b230ed2010-08-23 20:24:12 -070076/*
77 * Reinitialize completions in CQ, once Rx is taken down
78 */
79static void
80bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
81{
82 struct bna_cq_entry *cmpl, *next_cmpl;
83 unsigned int wi_range, wis = 0, ccb_prod = 0;
84 int i;
85
86 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
87 wi_range);
88
89 for (i = 0; i < ccb->q_depth; i++) {
90 wis++;
91 if (likely(--wi_range))
92 next_cmpl = cmpl + 1;
93 else {
94 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
95 wis = 0;
96 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
97 next_cmpl, wi_range);
98 }
99 cmpl->valid = 0;
100 cmpl = next_cmpl;
101 }
102}
103
104/*
105 * Frees all pending Tx Bufs
106 * At this point no activity is expected on the Q,
107 * so DMA unmap & freeing is fine.
108 */
109static void
110bnad_free_all_txbufs(struct bnad *bnad,
111 struct bna_tcb *tcb)
112{
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000113 u32 unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700114 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
115 struct bnad_skb_unmap *unmap_array;
116 struct sk_buff *skb = NULL;
117 int i;
118
119 unmap_array = unmap_q->unmap_array;
120
121 unmap_cons = 0;
122 while (unmap_cons < unmap_q->q_depth) {
123 skb = unmap_array[unmap_cons].skb;
124 if (!skb) {
125 unmap_cons++;
126 continue;
127 }
128 unmap_array[unmap_cons].skb = NULL;
129
Ivan Vecera5ea74312011-02-02 04:37:02 +0000130 dma_unmap_single(&bnad->pcidev->dev,
131 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700132 dma_addr), skb_headlen(skb),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000133 DMA_TO_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700134
Ivan Vecera5ea74312011-02-02 04:37:02 +0000135 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000136 if (++unmap_cons >= unmap_q->q_depth)
137 break;
138
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000140 dma_unmap_page(&bnad->pcidev->dev,
141 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700142 dma_addr),
143 skb_shinfo(skb)->frags[i].size,
Ivan Vecera5ea74312011-02-02 04:37:02 +0000144 DMA_TO_DEVICE);
145 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700146 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000147 if (++unmap_cons >= unmap_q->q_depth)
148 break;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700149 }
150 dev_kfree_skb_any(skb);
151 }
152}
153
154/* Data Path Handlers */
155
156/*
157 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
158 * Can be called in a) Interrupt context
159 * b) Sending context
160 * c) Tasklet context
161 */
162static u32
163bnad_free_txbufs(struct bnad *bnad,
164 struct bna_tcb *tcb)
165{
166 u32 sent_packets = 0, sent_bytes = 0;
167 u16 wis, unmap_cons, updated_hw_cons;
168 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
169 struct bnad_skb_unmap *unmap_array;
170 struct sk_buff *skb;
171 int i;
172
173 /*
174 * Just return if TX is stopped. This check is useful
175 * when bnad_free_txbufs() runs out of a tasklet scheduled
Rasesh Modybe7fa322010-12-23 21:45:01 +0000176 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700177 * but this routine runs actually after the cleanup has been
178 * executed.
179 */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000180 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700181 return 0;
182
183 updated_hw_cons = *(tcb->hw_consumer_index);
184
185 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
186 updated_hw_cons, tcb->q_depth);
187
188 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
189
190 unmap_array = unmap_q->unmap_array;
191 unmap_cons = unmap_q->consumer_index;
192
193 prefetch(&unmap_array[unmap_cons + 1]);
194 while (wis) {
195 skb = unmap_array[unmap_cons].skb;
196
197 unmap_array[unmap_cons].skb = NULL;
198
199 sent_packets++;
200 sent_bytes += skb->len;
201 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
202
Ivan Vecera5ea74312011-02-02 04:37:02 +0000203 dma_unmap_single(&bnad->pcidev->dev,
204 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700205 dma_addr), skb_headlen(skb),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000206 DMA_TO_DEVICE);
207 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700208 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
209
210 prefetch(&unmap_array[unmap_cons + 1]);
211 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
212 prefetch(&unmap_array[unmap_cons + 1]);
213
Ivan Vecera5ea74312011-02-02 04:37:02 +0000214 dma_unmap_page(&bnad->pcidev->dev,
215 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700216 dma_addr),
217 skb_shinfo(skb)->frags[i].size,
Ivan Vecera5ea74312011-02-02 04:37:02 +0000218 DMA_TO_DEVICE);
219 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700220 0);
221 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
222 }
223 dev_kfree_skb_any(skb);
224 }
225
226 /* Update consumer pointers. */
227 tcb->consumer_index = updated_hw_cons;
228 unmap_q->consumer_index = unmap_cons;
229
230 tcb->txq->tx_packets += sent_packets;
231 tcb->txq->tx_bytes += sent_bytes;
232
233 return sent_packets;
234}
235
236/* Tx Free Tasklet function */
237/* Frees for all the tcb's in all the Tx's */
238/*
239 * Scheduled from sending context, so that
240 * the fat Tx lock is not held for too long
241 * in the sending context.
242 */
243static void
244bnad_tx_free_tasklet(unsigned long bnad_ptr)
245{
246 struct bnad *bnad = (struct bnad *)bnad_ptr;
247 struct bna_tcb *tcb;
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000248 u32 acked = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700249 int i, j;
250
251 for (i = 0; i < bnad->num_tx; i++) {
252 for (j = 0; j < bnad->num_txq_per_tx; j++) {
253 tcb = bnad->tx_info[i].tcb[j];
254 if (!tcb)
255 continue;
256 if (((u16) (*tcb->hw_consumer_index) !=
257 tcb->consumer_index) &&
258 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
259 &tcb->flags))) {
260 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000261 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
262 &tcb->flags)))
263 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700264 smp_mb__before_clear_bit();
265 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
266 }
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000267 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
268 &tcb->flags)))
269 continue;
270 if (netif_queue_stopped(bnad->netdev)) {
271 if (acked && netif_carrier_ok(bnad->netdev) &&
272 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
273 BNAD_NETIF_WAKE_THRESHOLD) {
274 netif_wake_queue(bnad->netdev);
275 /* TODO */
276 /* Counters for individual TxQs? */
277 BNAD_UPDATE_CTR(bnad,
278 netif_queue_wakeup);
279 }
280 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700281 }
282 }
283}
284
285static u32
286bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
287{
288 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000289 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700290
291 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
292 return 0;
293
294 sent = bnad_free_txbufs(bnad, tcb);
295 if (sent) {
296 if (netif_queue_stopped(netdev) &&
297 netif_carrier_ok(netdev) &&
298 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
299 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000300 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
301 netif_wake_queue(netdev);
302 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
303 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700304 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000305 }
306
307 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700308 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700309
310 smp_mb__before_clear_bit();
311 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
312
313 return sent;
314}
315
316/* MSIX Tx Completion Handler */
317static irqreturn_t
318bnad_msix_tx(int irq, void *data)
319{
320 struct bna_tcb *tcb = (struct bna_tcb *)data;
321 struct bnad *bnad = tcb->bnad;
322
323 bnad_tx(bnad, tcb);
324
325 return IRQ_HANDLED;
326}
327
328static void
329bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
330{
331 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
332
333 rcb->producer_index = 0;
334 rcb->consumer_index = 0;
335
336 unmap_q->producer_index = 0;
337 unmap_q->consumer_index = 0;
338}
339
340static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000341bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700342{
343 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000344 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700345 struct sk_buff *skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000346 int unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700347
348 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000349 unmap_array = unmap_q->unmap_array;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000350 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000351 skb = unmap_array[unmap_cons].skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000352 if (!skb)
353 continue;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000354 unmap_array[unmap_cons].skb = NULL;
355 dma_unmap_single(&bnad->pcidev->dev,
356 dma_unmap_addr(&unmap_array[unmap_cons],
357 dma_addr),
358 rcb->rxq->buffer_size,
359 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700360 dev_kfree_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700361 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700362 bnad_reset_rcb(bnad, rcb);
363}
364
365static void
366bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
367{
368 u16 to_alloc, alloced, unmap_prod, wi_range;
369 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
370 struct bnad_skb_unmap *unmap_array;
371 struct bna_rxq_entry *rxent;
372 struct sk_buff *skb;
373 dma_addr_t dma_addr;
374
375 alloced = 0;
376 to_alloc =
377 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
378
379 unmap_array = unmap_q->unmap_array;
380 unmap_prod = unmap_q->producer_index;
381
382 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
383
384 while (to_alloc--) {
385 if (!wi_range) {
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
387 wi_range);
388 }
Eric Dumazet0a0e2342011-07-08 05:29:30 +0000389 skb = netdev_alloc_skb_ip_align(bnad->netdev,
390 rcb->rxq->buffer_size);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700391 if (unlikely(!skb)) {
392 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
393 goto finishing;
394 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700395 unmap_array[unmap_prod].skb = skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000396 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
397 rcb->rxq->buffer_size,
398 DMA_FROM_DEVICE);
399 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700400 dma_addr);
401 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
402 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
403
404 rxent++;
405 wi_range--;
406 alloced++;
407 }
408
409finishing:
410 if (likely(alloced)) {
411 unmap_q->producer_index = unmap_prod;
412 rcb->producer_index = unmap_prod;
413 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000414 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
415 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700416 }
417}
418
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700419static inline void
420bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
421{
422 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
423
424 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
425 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
426 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
427 bnad_alloc_n_post_rxbufs(bnad, rcb);
428 smp_mb__before_clear_bit();
429 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
430 }
431}
432
433static u32
434bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
435{
436 struct bna_cq_entry *cmpl, *next_cmpl;
437 struct bna_rcb *rcb = NULL;
438 unsigned int wi_range, packets = 0, wis = 0;
439 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000440 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700441 struct sk_buff *skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000442 u32 flags, unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700443 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
444 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
445
Rasesh Modybe7fa322010-12-23 21:45:01 +0000446 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
447 return 0;
448
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700449 prefetch(bnad->netdev);
450 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
451 wi_range);
452 BUG_ON(!(wi_range <= ccb->q_depth));
453 while (cmpl->valid && packets < budget) {
454 packets++;
455 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
456
457 if (qid0 == cmpl->rxq_id)
458 rcb = ccb->rcb[0];
459 else
460 rcb = ccb->rcb[1];
461
462 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000463 unmap_array = unmap_q->unmap_array;
464 unmap_cons = unmap_q->consumer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700465
Ivan Vecera5ea74312011-02-02 04:37:02 +0000466 skb = unmap_array[unmap_cons].skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700467 BUG_ON(!(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +0000468 unmap_array[unmap_cons].skb = NULL;
469 dma_unmap_single(&bnad->pcidev->dev,
470 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700471 dma_addr),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000472 rcb->rxq->buffer_size,
473 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700474 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
475
476 /* Should be more efficient ? Performance ? */
477 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
478
479 wis++;
480 if (likely(--wi_range))
481 next_cmpl = cmpl + 1;
482 else {
483 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
484 wis = 0;
485 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
486 next_cmpl, wi_range);
487 BUG_ON(!(wi_range <= ccb->q_depth));
488 }
489 prefetch(next_cmpl);
490
491 flags = ntohl(cmpl->flags);
492 if (unlikely
493 (flags &
494 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
495 BNA_CQ_EF_TOO_LONG))) {
496 dev_kfree_skb_any(skb);
497 rcb->rxq->rx_packets_with_error++;
498 goto next;
499 }
500
501 skb_put(skb, ntohs(cmpl->length));
502 if (likely
Michał Mirosławe5ee20e2011-04-12 09:38:23 +0000503 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700504 (((flags & BNA_CQ_EF_IPV4) &&
505 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
506 (flags & BNA_CQ_EF_IPV6)) &&
507 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
508 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
509 skb->ip_summed = CHECKSUM_UNNECESSARY;
510 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700511 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700512
513 rcb->rxq->rx_packets++;
514 rcb->rxq->rx_bytes += skb->len;
515 skb->protocol = eth_type_trans(skb, bnad->netdev);
516
517 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
518 struct bnad_rx_ctrl *rx_ctrl =
519 (struct bnad_rx_ctrl *)ccb->ctrl;
520 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
521 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
522 ntohs(cmpl->vlan_tag), skb);
523 else
524 vlan_hwaccel_receive_skb(skb,
525 bnad->vlan_grp,
526 ntohs(cmpl->vlan_tag));
527
528 } else { /* Not VLAN tagged/stripped */
529 struct bnad_rx_ctrl *rx_ctrl =
530 (struct bnad_rx_ctrl *)ccb->ctrl;
531 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
532 napi_gro_receive(&rx_ctrl->napi, skb);
533 else
534 netif_receive_skb(skb);
535 }
536
537next:
538 cmpl->valid = 0;
539 cmpl = next_cmpl;
540 }
541
542 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
543
544 if (likely(ccb)) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000545 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
546 bna_ib_ack(ccb->i_dbell, packets);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700547 bnad_refill_rxq(bnad, ccb->rcb[0]);
548 if (ccb->rcb[1])
549 bnad_refill_rxq(bnad, ccb->rcb[1]);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000550 } else {
551 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
552 bna_ib_ack(ccb->i_dbell, 0);
553 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700554
555 return packets;
556}
557
558static void
559bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
560{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000561 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
562 return;
563
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700564 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
565 bna_ib_ack(ccb->i_dbell, 0);
566}
567
568static void
569bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
570{
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000571 unsigned long flags;
572
Rasesh Modyaad75b62010-12-23 21:45:08 +0000573 /* Because of polling context */
574 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700575 bnad_enable_rx_irq_unsafe(ccb);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000576 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700577}
578
579static void
580bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
581{
582 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000583 struct napi_struct *napi = &rx_ctrl->napi;
584
585 if (likely(napi_schedule_prep(napi))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700586 bnad_disable_rx_irq(bnad, ccb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000587 __napi_schedule(napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700588 }
589 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
590}
591
592/* MSIX Rx Path Handler */
593static irqreturn_t
594bnad_msix_rx(int irq, void *data)
595{
596 struct bna_ccb *ccb = (struct bna_ccb *)data;
597 struct bnad *bnad = ccb->bnad;
598
599 bnad_netif_rx_schedule_poll(bnad, ccb);
600
601 return IRQ_HANDLED;
602}
603
604/* Interrupt handlers */
605
606/* Mbox Interrupt Handlers */
607static irqreturn_t
608bnad_msix_mbox_handler(int irq, void *data)
609{
610 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000611 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000612 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700613
Rasesh Modybe7fa322010-12-23 21:45:01 +0000614 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
615 return IRQ_HANDLED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700616
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700617 spin_lock_irqsave(&bnad->bna_lock, flags);
618
619 bna_intr_status_get(&bnad->bna, intr_status);
620
621 if (BNA_IS_MBOX_ERR_INTR(intr_status))
622 bna_mbox_handler(&bnad->bna, intr_status);
623
624 spin_unlock_irqrestore(&bnad->bna_lock, flags);
625
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700626 return IRQ_HANDLED;
627}
628
629static irqreturn_t
630bnad_isr(int irq, void *data)
631{
632 int i, j;
633 u32 intr_status;
634 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000635 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700636 struct bnad_rx_info *rx_info;
637 struct bnad_rx_ctrl *rx_ctrl;
638
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000639 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
640 return IRQ_NONE;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700641
642 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000643
644 if (unlikely(!intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700645 return IRQ_NONE;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000646
647 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700648
Rasesh Modybe7fa322010-12-23 21:45:01 +0000649 if (BNA_IS_MBOX_ERR_INTR(intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700650 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000651
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700652 spin_unlock_irqrestore(&bnad->bna_lock, flags);
653
Rasesh Modybe7fa322010-12-23 21:45:01 +0000654 if (!BNA_IS_INTX_DATA_INTR(intr_status))
655 return IRQ_HANDLED;
656
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700657 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000658 /* Tx processing */
659 for (i = 0; i < bnad->num_tx; i++) {
660 for (j = 0; j < bnad->num_txq_per_tx; j++)
661 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
662 }
663 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700664 for (i = 0; i < bnad->num_rx; i++) {
665 rx_info = &bnad->rx_info[i];
666 if (!rx_info->rx)
667 continue;
668 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
669 rx_ctrl = &rx_info->rx_ctrl[j];
670 if (rx_ctrl->ccb)
671 bnad_netif_rx_schedule_poll(bnad,
672 rx_ctrl->ccb);
673 }
674 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700675 return IRQ_HANDLED;
676}
677
678/*
679 * Called in interrupt / callback context
680 * with bna_lock held, so cfg_flags access is OK
681 */
682static void
683bnad_enable_mbox_irq(struct bnad *bnad)
684{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000685 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000686
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700687 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
688}
689
690/*
691 * Called with bnad->bna_lock held b'cos of
692 * bnad->cfg_flags access.
693 */
Rasesh Modyb7ee31c52010-10-05 15:46:05 +0000694static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700695bnad_disable_mbox_irq(struct bnad *bnad)
696{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000697 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000698
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700699 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
700}
701
Rasesh Modybe7fa322010-12-23 21:45:01 +0000702static void
703bnad_set_netdev_perm_addr(struct bnad *bnad)
704{
705 struct net_device *netdev = bnad->netdev;
706
707 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
708 if (is_zero_ether_addr(netdev->dev_addr))
709 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
710}
711
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700712/* Control Path Handlers */
713
714/* Callbacks */
715void
716bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
717{
718 bnad_enable_mbox_irq(bnad);
719}
720
721void
722bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
723{
724 bnad_disable_mbox_irq(bnad);
725}
726
727void
728bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
729{
730 complete(&bnad->bnad_completions.ioc_comp);
731 bnad->bnad_completions.ioc_comp_status = status;
732}
733
734void
735bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
736{
737 complete(&bnad->bnad_completions.ioc_comp);
738 bnad->bnad_completions.ioc_comp_status = status;
739}
740
741static void
742bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
743{
744 struct bnad *bnad = (struct bnad *)arg;
745
746 complete(&bnad->bnad_completions.port_comp);
747
748 netif_carrier_off(bnad->netdev);
749}
750
751void
752bnad_cb_port_link_status(struct bnad *bnad,
753 enum bna_link_status link_status)
754{
755 bool link_up = 0;
756
757 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
758
759 if (link_status == BNA_CEE_UP) {
760 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
761 BNAD_UPDATE_CTR(bnad, cee_up);
762 } else
763 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
764
765 if (link_up) {
766 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000767 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
768 if (!tcb)
769 return;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700770 pr_warn("bna: %s link up\n",
771 bnad->netdev->name);
772 netif_carrier_on(bnad->netdev);
773 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000774 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700775 /* Force an immediate Transmit Schedule */
776 pr_info("bna: %s TX_STARTED\n",
777 bnad->netdev->name);
778 netif_wake_queue(bnad->netdev);
779 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
780 } else {
781 netif_stop_queue(bnad->netdev);
782 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
783 }
784 }
785 } else {
786 if (netif_carrier_ok(bnad->netdev)) {
787 pr_warn("bna: %s link down\n",
788 bnad->netdev->name);
789 netif_carrier_off(bnad->netdev);
790 BNAD_UPDATE_CTR(bnad, link_toggle);
791 }
792 }
793}
794
795static void
796bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
797 enum bna_cb_status status)
798{
799 struct bnad *bnad = (struct bnad *)arg;
800
801 complete(&bnad->bnad_completions.tx_comp);
802}
803
804static void
805bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
806{
807 struct bnad_tx_info *tx_info =
808 (struct bnad_tx_info *)tcb->txq->tx->priv;
809 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
810
811 tx_info->tcb[tcb->id] = tcb;
812 unmap_q->producer_index = 0;
813 unmap_q->consumer_index = 0;
814 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
815}
816
817static void
818bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
819{
820 struct bnad_tx_info *tx_info =
821 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000822 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
823
824 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
825 cpu_relax();
826
827 bnad_free_all_txbufs(bnad, tcb);
828
829 unmap_q->producer_index = 0;
830 unmap_q->consumer_index = 0;
831
832 smp_mb__before_clear_bit();
833 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700834
835 tx_info->tcb[tcb->id] = NULL;
836}
837
838static void
839bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
840{
841 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
842
843 unmap_q->producer_index = 0;
844 unmap_q->consumer_index = 0;
845 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
846}
847
848static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000849bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
850{
851 bnad_free_all_rxbufs(bnad, rcb);
852}
853
854static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700855bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
856{
857 struct bnad_rx_info *rx_info =
858 (struct bnad_rx_info *)ccb->cq->rx->priv;
859
860 rx_info->rx_ctrl[ccb->id].ccb = ccb;
861 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
862}
863
864static void
865bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
866{
867 struct bnad_rx_info *rx_info =
868 (struct bnad_rx_info *)ccb->cq->rx->priv;
869
870 rx_info->rx_ctrl[ccb->id].ccb = NULL;
871}
872
873static void
874bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
875{
876 struct bnad_tx_info *tx_info =
877 (struct bnad_tx_info *)tcb->txq->tx->priv;
878
879 if (tx_info != &bnad->tx_info[0])
880 return;
881
Rasesh Modybe7fa322010-12-23 21:45:01 +0000882 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700883 netif_stop_queue(bnad->netdev);
884 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
885}
886
887static void
888bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
889{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000890 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
891
892 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700893 return;
894
Rasesh Modybe7fa322010-12-23 21:45:01 +0000895 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
896
897 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
898 cpu_relax();
899
900 bnad_free_all_txbufs(bnad, tcb);
901
902 unmap_q->producer_index = 0;
903 unmap_q->consumer_index = 0;
904
905 smp_mb__before_clear_bit();
906 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
907
908 /*
909 * Workaround for first device enable failure & we
910 * get a 0 MAC address. We try to get the MAC address
911 * again here.
912 */
913 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
914 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
915 bnad_set_netdev_perm_addr(bnad);
916 }
917
918 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
919
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700920 if (netif_carrier_ok(bnad->netdev)) {
921 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
922 netif_wake_queue(bnad->netdev);
923 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
924 }
925}
926
927static void
928bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
929{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000930 /* Delay only once for the whole Tx Path Shutdown */
931 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
932 mdelay(BNAD_TXRX_SYNC_MDELAY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700933}
934
935static void
936bnad_cb_rx_cleanup(struct bnad *bnad,
937 struct bna_ccb *ccb)
938{
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700939 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
940
Rasesh Modybe7fa322010-12-23 21:45:01 +0000941 if (ccb->rcb[1])
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700942 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000943
944 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
945 mdelay(BNAD_TXRX_SYNC_MDELAY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700946}
947
948static void
949bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
950{
951 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
952
Rasesh Modybe7fa322010-12-23 21:45:01 +0000953 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
954
955 if (rcb == rcb->cq->ccb->rcb[0])
956 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
957
958 bnad_free_all_rxbufs(bnad, rcb);
959
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700960 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
961
962 /* Now allocate & post buffers for this RCB */
963 /* !!Allocation in callback context */
964 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
965 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
966 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
967 bnad_alloc_n_post_rxbufs(bnad, rcb);
968 smp_mb__before_clear_bit();
969 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
970 }
971}
972
973static void
974bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
975 enum bna_cb_status status)
976{
977 struct bnad *bnad = (struct bnad *)arg;
978
979 complete(&bnad->bnad_completions.rx_comp);
980}
981
982static void
983bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
984 enum bna_cb_status status)
985{
986 bnad->bnad_completions.mcast_comp_status = status;
987 complete(&bnad->bnad_completions.mcast_comp);
988}
989
990void
991bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
992 struct bna_stats *stats)
993{
994 if (status == BNA_CB_SUCCESS)
995 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
996
997 if (!netif_running(bnad->netdev) ||
998 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
999 return;
1000
1001 mod_timer(&bnad->stats_timer,
1002 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1003}
1004
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001005/* Resource allocation, free functions */
1006
1007static void
1008bnad_mem_free(struct bnad *bnad,
1009 struct bna_mem_info *mem_info)
1010{
1011 int i;
1012 dma_addr_t dma_pa;
1013
1014 if (mem_info->mdl == NULL)
1015 return;
1016
1017 for (i = 0; i < mem_info->num; i++) {
1018 if (mem_info->mdl[i].kva != NULL) {
1019 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1020 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1021 dma_pa);
Ivan Vecera5ea74312011-02-02 04:37:02 +00001022 dma_free_coherent(&bnad->pcidev->dev,
1023 mem_info->mdl[i].len,
1024 mem_info->mdl[i].kva, dma_pa);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001025 } else
1026 kfree(mem_info->mdl[i].kva);
1027 }
1028 }
1029 kfree(mem_info->mdl);
1030 mem_info->mdl = NULL;
1031}
1032
1033static int
1034bnad_mem_alloc(struct bnad *bnad,
1035 struct bna_mem_info *mem_info)
1036{
1037 int i;
1038 dma_addr_t dma_pa;
1039
1040 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1041 mem_info->mdl = NULL;
1042 return 0;
1043 }
1044
1045 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1046 GFP_KERNEL);
1047 if (mem_info->mdl == NULL)
1048 return -ENOMEM;
1049
1050 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1051 for (i = 0; i < mem_info->num; i++) {
1052 mem_info->mdl[i].len = mem_info->len;
1053 mem_info->mdl[i].kva =
Ivan Vecera5ea74312011-02-02 04:37:02 +00001054 dma_alloc_coherent(&bnad->pcidev->dev,
1055 mem_info->len, &dma_pa,
1056 GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001057
1058 if (mem_info->mdl[i].kva == NULL)
1059 goto err_return;
1060
1061 BNA_SET_DMA_ADDR(dma_pa,
1062 &(mem_info->mdl[i].dma));
1063 }
1064 } else {
1065 for (i = 0; i < mem_info->num; i++) {
1066 mem_info->mdl[i].len = mem_info->len;
1067 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1068 GFP_KERNEL);
1069 if (mem_info->mdl[i].kva == NULL)
1070 goto err_return;
1071 }
1072 }
1073
1074 return 0;
1075
1076err_return:
1077 bnad_mem_free(bnad, mem_info);
1078 return -ENOMEM;
1079}
1080
1081/* Free IRQ for Mailbox */
1082static void
1083bnad_mbox_irq_free(struct bnad *bnad,
1084 struct bna_intr_info *intr_info)
1085{
1086 int irq;
1087 unsigned long flags;
1088
1089 if (intr_info->idl == NULL)
1090 return;
1091
1092 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001093 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001094 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001095
1096 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001097 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001098
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001099 kfree(intr_info->idl);
1100}
1101
1102/*
1103 * Allocates IRQ for Mailbox, but keep it disabled
1104 * This will be enabled once we get the mbox enable callback
1105 * from bna
1106 */
1107static int
1108bnad_mbox_irq_alloc(struct bnad *bnad,
1109 struct bna_intr_info *intr_info)
1110{
Rasesh Modybe7fa322010-12-23 21:45:01 +00001111 int err = 0;
Shyam Iyer5f778982011-06-28 08:58:05 +00001112 unsigned long irq_flags = 0, flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001113 u32 irq;
1114 irq_handler_t irq_handler;
1115
1116 /* Mbox should use only 1 vector */
1117
1118 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1119 if (!intr_info->idl)
1120 return -ENOMEM;
1121
1122 spin_lock_irqsave(&bnad->bna_lock, flags);
1123 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1124 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1125 irq = bnad->msix_table[bnad->msix_num - 1].vector;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001126 intr_info->intr_type = BNA_INTR_T_MSIX;
1127 intr_info->idl[0].vector = bnad->msix_num - 1;
1128 } else {
1129 irq_handler = (irq_handler_t)bnad_isr;
1130 irq = bnad->pcidev->irq;
Shyam Iyer5f778982011-06-28 08:58:05 +00001131 irq_flags = IRQF_SHARED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001132 intr_info->intr_type = BNA_INTR_T_INTX;
1133 /* intr_info->idl.vector = 0 ? */
1134 }
1135 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Shyam Iyer5f778982011-06-28 08:58:05 +00001136 flags = irq_flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001137 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1138
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001139 /*
1140 * Set the Mbox IRQ disable flag, so that the IRQ handler
1141 * called from request_irq() for SHARED IRQs do not execute
1142 */
1143 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1144
Rasesh Modybe7fa322010-12-23 21:45:01 +00001145 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1146
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001147 err = request_irq(irq, irq_handler, flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001148 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001149
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001150 if (err) {
1151 kfree(intr_info->idl);
1152 intr_info->idl = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001153 }
1154
Rasesh Modybe7fa322010-12-23 21:45:01 +00001155 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001156}
1157
1158static void
1159bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1160{
1161 kfree(intr_info->idl);
1162 intr_info->idl = NULL;
1163}
1164
1165/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1166static int
1167bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1168 uint txrx_id, struct bna_intr_info *intr_info)
1169{
1170 int i, vector_start = 0;
1171 u32 cfg_flags;
1172 unsigned long flags;
1173
1174 spin_lock_irqsave(&bnad->bna_lock, flags);
1175 cfg_flags = bnad->cfg_flags;
1176 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1177
1178 if (cfg_flags & BNAD_CF_MSIX) {
1179 intr_info->intr_type = BNA_INTR_T_MSIX;
1180 intr_info->idl = kcalloc(intr_info->num,
1181 sizeof(struct bna_intr_descr),
1182 GFP_KERNEL);
1183 if (!intr_info->idl)
1184 return -ENOMEM;
1185
1186 switch (src) {
1187 case BNAD_INTR_TX:
1188 vector_start = txrx_id;
1189 break;
1190
1191 case BNAD_INTR_RX:
1192 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1193 txrx_id;
1194 break;
1195
1196 default:
1197 BUG();
1198 }
1199
1200 for (i = 0; i < intr_info->num; i++)
1201 intr_info->idl[i].vector = vector_start + i;
1202 } else {
1203 intr_info->intr_type = BNA_INTR_T_INTX;
1204 intr_info->num = 1;
1205 intr_info->idl = kcalloc(intr_info->num,
1206 sizeof(struct bna_intr_descr),
1207 GFP_KERNEL);
1208 if (!intr_info->idl)
1209 return -ENOMEM;
1210
1211 switch (src) {
1212 case BNAD_INTR_TX:
1213 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1214 break;
1215
1216 case BNAD_INTR_RX:
1217 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1218 break;
1219 }
1220 }
1221 return 0;
1222}
1223
1224/**
1225 * NOTE: Should be called for MSIX only
1226 * Unregisters Tx MSIX vector(s) from the kernel
1227 */
1228static void
1229bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1230 int num_txqs)
1231{
1232 int i;
1233 int vector_num;
1234
1235 for (i = 0; i < num_txqs; i++) {
1236 if (tx_info->tcb[i] == NULL)
1237 continue;
1238
1239 vector_num = tx_info->tcb[i]->intr_vector;
1240 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1241 }
1242}
1243
1244/**
1245 * NOTE: Should be called for MSIX only
1246 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1247 */
1248static int
1249bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1250 uint tx_id, int num_txqs)
1251{
1252 int i;
1253 int err;
1254 int vector_num;
1255
1256 for (i = 0; i < num_txqs; i++) {
1257 vector_num = tx_info->tcb[i]->intr_vector;
1258 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1259 tx_id + tx_info->tcb[i]->id);
1260 err = request_irq(bnad->msix_table[vector_num].vector,
1261 (irq_handler_t)bnad_msix_tx, 0,
1262 tx_info->tcb[i]->name,
1263 tx_info->tcb[i]);
1264 if (err)
1265 goto err_return;
1266 }
1267
1268 return 0;
1269
1270err_return:
1271 if (i > 0)
1272 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1273 return -1;
1274}
1275
1276/**
1277 * NOTE: Should be called for MSIX only
1278 * Unregisters Rx MSIX vector(s) from the kernel
1279 */
1280static void
1281bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1282 int num_rxps)
1283{
1284 int i;
1285 int vector_num;
1286
1287 for (i = 0; i < num_rxps; i++) {
1288 if (rx_info->rx_ctrl[i].ccb == NULL)
1289 continue;
1290
1291 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1292 free_irq(bnad->msix_table[vector_num].vector,
1293 rx_info->rx_ctrl[i].ccb);
1294 }
1295}
1296
1297/**
1298 * NOTE: Should be called for MSIX only
1299 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1300 */
1301static int
1302bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1303 uint rx_id, int num_rxps)
1304{
1305 int i;
1306 int err;
1307 int vector_num;
1308
1309 for (i = 0; i < num_rxps; i++) {
1310 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1311 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1312 bnad->netdev->name,
1313 rx_id + rx_info->rx_ctrl[i].ccb->id);
1314 err = request_irq(bnad->msix_table[vector_num].vector,
1315 (irq_handler_t)bnad_msix_rx, 0,
1316 rx_info->rx_ctrl[i].ccb->name,
1317 rx_info->rx_ctrl[i].ccb);
1318 if (err)
1319 goto err_return;
1320 }
1321
1322 return 0;
1323
1324err_return:
1325 if (i > 0)
1326 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1327 return -1;
1328}
1329
1330/* Free Tx object Resources */
1331static void
1332bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1333{
1334 int i;
1335
1336 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1337 if (res_info[i].res_type == BNA_RES_T_MEM)
1338 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1339 else if (res_info[i].res_type == BNA_RES_T_INTR)
1340 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1341 }
1342}
1343
1344/* Allocates memory and interrupt resources for Tx object */
1345static int
1346bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1347 uint tx_id)
1348{
1349 int i, err = 0;
1350
1351 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1352 if (res_info[i].res_type == BNA_RES_T_MEM)
1353 err = bnad_mem_alloc(bnad,
1354 &res_info[i].res_u.mem_info);
1355 else if (res_info[i].res_type == BNA_RES_T_INTR)
1356 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1357 &res_info[i].res_u.intr_info);
1358 if (err)
1359 goto err_return;
1360 }
1361 return 0;
1362
1363err_return:
1364 bnad_tx_res_free(bnad, res_info);
1365 return err;
1366}
1367
1368/* Free Rx object Resources */
1369static void
1370bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1371{
1372 int i;
1373
1374 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1375 if (res_info[i].res_type == BNA_RES_T_MEM)
1376 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1377 else if (res_info[i].res_type == BNA_RES_T_INTR)
1378 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1379 }
1380}
1381
1382/* Allocates memory and interrupt resources for Rx object */
1383static int
1384bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1385 uint rx_id)
1386{
1387 int i, err = 0;
1388
1389 /* All memory needs to be allocated before setup_ccbs */
1390 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1391 if (res_info[i].res_type == BNA_RES_T_MEM)
1392 err = bnad_mem_alloc(bnad,
1393 &res_info[i].res_u.mem_info);
1394 else if (res_info[i].res_type == BNA_RES_T_INTR)
1395 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1396 &res_info[i].res_u.intr_info);
1397 if (err)
1398 goto err_return;
1399 }
1400 return 0;
1401
1402err_return:
1403 bnad_rx_res_free(bnad, res_info);
1404 return err;
1405}
1406
1407/* Timer callbacks */
1408/* a) IOC timer */
1409static void
1410bnad_ioc_timeout(unsigned long data)
1411{
1412 struct bnad *bnad = (struct bnad *)data;
1413 unsigned long flags;
1414
1415 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8a891422010-08-25 23:00:27 -07001416 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001417 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1418}
1419
1420static void
1421bnad_ioc_hb_check(unsigned long data)
1422{
1423 struct bnad *bnad = (struct bnad *)data;
1424 unsigned long flags;
1425
1426 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8a891422010-08-25 23:00:27 -07001427 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001428 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1429}
1430
1431static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001432bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001433{
1434 struct bnad *bnad = (struct bnad *)data;
1435 unsigned long flags;
1436
1437 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001438 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1439 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1440}
1441
1442static void
1443bnad_iocpf_sem_timeout(unsigned long data)
1444{
1445 struct bnad *bnad = (struct bnad *)data;
1446 unsigned long flags;
1447
1448 spin_lock_irqsave(&bnad->bna_lock, flags);
1449 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001450 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1451}
1452
1453/*
1454 * All timer routines use bnad->bna_lock to protect against
1455 * the following race, which may occur in case of no locking:
1456 * Time CPU m CPU n
1457 * 0 1 = test_bit
1458 * 1 clear_bit
1459 * 2 del_timer_sync
1460 * 3 mod_timer
1461 */
1462
1463/* b) Dynamic Interrupt Moderation Timer */
1464static void
1465bnad_dim_timeout(unsigned long data)
1466{
1467 struct bnad *bnad = (struct bnad *)data;
1468 struct bnad_rx_info *rx_info;
1469 struct bnad_rx_ctrl *rx_ctrl;
1470 int i, j;
1471 unsigned long flags;
1472
1473 if (!netif_carrier_ok(bnad->netdev))
1474 return;
1475
1476 spin_lock_irqsave(&bnad->bna_lock, flags);
1477 for (i = 0; i < bnad->num_rx; i++) {
1478 rx_info = &bnad->rx_info[i];
1479 if (!rx_info->rx)
1480 continue;
1481 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1482 rx_ctrl = &rx_info->rx_ctrl[j];
1483 if (!rx_ctrl->ccb)
1484 continue;
1485 bna_rx_dim_update(rx_ctrl->ccb);
1486 }
1487 }
1488
1489 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1490 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1491 mod_timer(&bnad->dim_timer,
1492 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1493 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1494}
1495
1496/* c) Statistics Timer */
1497static void
1498bnad_stats_timeout(unsigned long data)
1499{
1500 struct bnad *bnad = (struct bnad *)data;
1501 unsigned long flags;
1502
1503 if (!netif_running(bnad->netdev) ||
1504 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1505 return;
1506
1507 spin_lock_irqsave(&bnad->bna_lock, flags);
1508 bna_stats_get(&bnad->bna);
1509 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1510}
1511
1512/*
1513 * Set up timer for DIM
1514 * Called with bnad->bna_lock held
1515 */
1516void
1517bnad_dim_timer_start(struct bnad *bnad)
1518{
1519 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1520 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1521 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1522 (unsigned long)bnad);
1523 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1524 mod_timer(&bnad->dim_timer,
1525 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1526 }
1527}
1528
1529/*
1530 * Set up timer for statistics
1531 * Called with mutex_lock(&bnad->conf_mutex) held
1532 */
1533static void
1534bnad_stats_timer_start(struct bnad *bnad)
1535{
1536 unsigned long flags;
1537
1538 spin_lock_irqsave(&bnad->bna_lock, flags);
1539 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1540 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1541 (unsigned long)bnad);
1542 mod_timer(&bnad->stats_timer,
1543 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1544 }
1545 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001546}
1547
1548/*
1549 * Stops the stats timer
1550 * Called with mutex_lock(&bnad->conf_mutex) held
1551 */
1552static void
1553bnad_stats_timer_stop(struct bnad *bnad)
1554{
1555 int to_del = 0;
1556 unsigned long flags;
1557
1558 spin_lock_irqsave(&bnad->bna_lock, flags);
1559 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1560 to_del = 1;
1561 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1562 if (to_del)
1563 del_timer_sync(&bnad->stats_timer);
1564}
1565
1566/* Utilities */
1567
1568static void
1569bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1570{
1571 int i = 1; /* Index 0 has broadcast address */
1572 struct netdev_hw_addr *mc_addr;
1573
1574 netdev_for_each_mc_addr(mc_addr, netdev) {
1575 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1576 ETH_ALEN);
1577 i++;
1578 }
1579}
1580
1581static int
1582bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1583{
1584 struct bnad_rx_ctrl *rx_ctrl =
1585 container_of(napi, struct bnad_rx_ctrl, napi);
1586 struct bna_ccb *ccb;
1587 struct bnad *bnad;
1588 int rcvd = 0;
1589
1590 ccb = rx_ctrl->ccb;
1591
1592 bnad = ccb->bnad;
1593
1594 if (!netif_carrier_ok(bnad->netdev))
1595 goto poll_exit;
1596
1597 rcvd = bnad_poll_cq(bnad, ccb, budget);
1598 if (rcvd == budget)
1599 return rcvd;
1600
1601poll_exit:
1602 napi_complete((napi));
1603
1604 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1605
1606 bnad_enable_rx_irq(bnad, ccb);
1607 return rcvd;
1608}
1609
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001610static void
1611bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1612{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001613 struct bnad_rx_ctrl *rx_ctrl;
1614 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001615
1616 /* Initialize & enable NAPI */
1617 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1618 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
Rasesh Modybe7fa322010-12-23 21:45:01 +00001619
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001620 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001621 bnad_napi_poll_rx, 64);
1622
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001623 napi_enable(&rx_ctrl->napi);
1624 }
1625}
1626
1627static void
1628bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1629{
1630 int i;
1631
1632 /* First disable and then clean up */
1633 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1634 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1635 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1636 }
1637}
1638
1639/* Should be held with conf_lock held */
1640void
1641bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1642{
1643 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1644 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1645 unsigned long flags;
1646
1647 if (!tx_info->tx)
1648 return;
1649
1650 init_completion(&bnad->bnad_completions.tx_comp);
1651 spin_lock_irqsave(&bnad->bna_lock, flags);
1652 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1653 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1654 wait_for_completion(&bnad->bnad_completions.tx_comp);
1655
1656 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1657 bnad_tx_msix_unregister(bnad, tx_info,
1658 bnad->num_txq_per_tx);
1659
1660 spin_lock_irqsave(&bnad->bna_lock, flags);
1661 bna_tx_destroy(tx_info->tx);
1662 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1663
1664 tx_info->tx = NULL;
1665
1666 if (0 == tx_id)
1667 tasklet_kill(&bnad->tx_free_tasklet);
1668
1669 bnad_tx_res_free(bnad, res_info);
1670}
1671
1672/* Should be held with conf_lock held */
1673int
1674bnad_setup_tx(struct bnad *bnad, uint tx_id)
1675{
1676 int err;
1677 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1678 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1679 struct bna_intr_info *intr_info =
1680 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1681 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1682 struct bna_tx_event_cbfn tx_cbfn;
1683 struct bna_tx *tx;
1684 unsigned long flags;
1685
1686 /* Initialize the Tx object configuration */
1687 tx_config->num_txq = bnad->num_txq_per_tx;
1688 tx_config->txq_depth = bnad->txq_depth;
1689 tx_config->tx_type = BNA_TX_T_REGULAR;
1690
1691 /* Initialize the tx event handlers */
1692 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1693 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1694 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1695 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1696 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1697
1698 /* Get BNA's resource requirement for one tx object */
1699 spin_lock_irqsave(&bnad->bna_lock, flags);
1700 bna_tx_res_req(bnad->num_txq_per_tx,
1701 bnad->txq_depth, res_info);
1702 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1703
1704 /* Fill Unmap Q memory requirements */
1705 BNAD_FILL_UNMAPQ_MEM_REQ(
1706 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1707 bnad->num_txq_per_tx,
1708 BNAD_TX_UNMAPQ_DEPTH);
1709
1710 /* Allocate resources */
1711 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1712 if (err)
1713 return err;
1714
1715 /* Ask BNA to create one Tx object, supplying required resources */
1716 spin_lock_irqsave(&bnad->bna_lock, flags);
1717 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1718 tx_info);
1719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1720 if (!tx)
1721 goto err_return;
1722 tx_info->tx = tx;
1723
1724 /* Register ISR for the Tx object */
1725 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1726 err = bnad_tx_msix_register(bnad, tx_info,
1727 tx_id, bnad->num_txq_per_tx);
1728 if (err)
1729 goto err_return;
1730 }
1731
1732 spin_lock_irqsave(&bnad->bna_lock, flags);
1733 bna_tx_enable(tx);
1734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1735
1736 return 0;
1737
1738err_return:
1739 bnad_tx_res_free(bnad, res_info);
1740 return err;
1741}
1742
1743/* Setup the rx config for bna_rx_create */
1744/* bnad decides the configuration */
1745static void
1746bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1747{
1748 rx_config->rx_type = BNA_RX_T_REGULAR;
1749 rx_config->num_paths = bnad->num_rxp_per_rx;
1750
1751 if (bnad->num_rxp_per_rx > 1) {
1752 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1753 rx_config->rss_config.hash_type =
1754 (BFI_RSS_T_V4_TCP |
1755 BFI_RSS_T_V6_TCP |
1756 BFI_RSS_T_V4_IP |
1757 BFI_RSS_T_V6_IP);
1758 rx_config->rss_config.hash_mask =
1759 bnad->num_rxp_per_rx - 1;
1760 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1761 sizeof(rx_config->rss_config.toeplitz_hash_key));
1762 } else {
1763 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1764 memset(&rx_config->rss_config, 0,
1765 sizeof(rx_config->rss_config));
1766 }
1767 rx_config->rxp_type = BNA_RXP_SLR;
1768 rx_config->q_depth = bnad->rxq_depth;
1769
1770 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1771
1772 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1773}
1774
1775/* Called with mutex_lock(&bnad->conf_mutex) held */
1776void
1777bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1778{
1779 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1780 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1781 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1782 unsigned long flags;
1783 int dim_timer_del = 0;
1784
1785 if (!rx_info->rx)
1786 return;
1787
1788 if (0 == rx_id) {
1789 spin_lock_irqsave(&bnad->bna_lock, flags);
1790 dim_timer_del = bnad_dim_timer_running(bnad);
1791 if (dim_timer_del)
1792 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1794 if (dim_timer_del)
1795 del_timer_sync(&bnad->dim_timer);
1796 }
1797
1798 bnad_napi_disable(bnad, rx_id);
1799
1800 init_completion(&bnad->bnad_completions.rx_comp);
1801 spin_lock_irqsave(&bnad->bna_lock, flags);
1802 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1803 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1804 wait_for_completion(&bnad->bnad_completions.rx_comp);
1805
1806 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1807 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1808
1809 spin_lock_irqsave(&bnad->bna_lock, flags);
1810 bna_rx_destroy(rx_info->rx);
1811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1812
1813 rx_info->rx = NULL;
1814
1815 bnad_rx_res_free(bnad, res_info);
1816}
1817
1818/* Called with mutex_lock(&bnad->conf_mutex) held */
1819int
1820bnad_setup_rx(struct bnad *bnad, uint rx_id)
1821{
1822 int err;
1823 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1824 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1825 struct bna_intr_info *intr_info =
1826 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1827 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1828 struct bna_rx_event_cbfn rx_cbfn;
1829 struct bna_rx *rx;
1830 unsigned long flags;
1831
1832 /* Initialize the Rx object configuration */
1833 bnad_init_rx_config(bnad, rx_config);
1834
1835 /* Initialize the Rx event handlers */
1836 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001837 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001838 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1839 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1840 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1841 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1842
1843 /* Get BNA's resource requirement for one Rx object */
1844 spin_lock_irqsave(&bnad->bna_lock, flags);
1845 bna_rx_res_req(rx_config, res_info);
1846 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1847
1848 /* Fill Unmap Q memory requirements */
1849 BNAD_FILL_UNMAPQ_MEM_REQ(
1850 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1851 rx_config->num_paths +
1852 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1853 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1854
1855 /* Allocate resource */
1856 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1857 if (err)
1858 return err;
1859
1860 /* Ask BNA to create one Rx object, supplying required resources */
1861 spin_lock_irqsave(&bnad->bna_lock, flags);
1862 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1863 rx_info);
1864 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1865 if (!rx)
1866 goto err_return;
1867 rx_info->rx = rx;
1868
1869 /* Register ISR for the Rx object */
1870 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1871 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1872 rx_config->num_paths);
1873 if (err)
1874 goto err_return;
1875 }
1876
1877 /* Enable NAPI */
1878 bnad_napi_enable(bnad, rx_id);
1879
1880 spin_lock_irqsave(&bnad->bna_lock, flags);
1881 if (0 == rx_id) {
1882 /* Set up Dynamic Interrupt Moderation Vector */
1883 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1884 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1885
1886 /* Enable VLAN filtering only on the default Rx */
1887 bna_rx_vlanfilter_enable(rx);
1888
1889 /* Start the DIM timer */
1890 bnad_dim_timer_start(bnad);
1891 }
1892
1893 bna_rx_enable(rx);
1894 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1895
1896 return 0;
1897
1898err_return:
1899 bnad_cleanup_rx(bnad, rx_id);
1900 return err;
1901}
1902
1903/* Called with conf_lock & bnad->bna_lock held */
1904void
1905bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1906{
1907 struct bnad_tx_info *tx_info;
1908
1909 tx_info = &bnad->tx_info[0];
1910 if (!tx_info->tx)
1911 return;
1912
1913 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1914}
1915
1916/* Called with conf_lock & bnad->bna_lock held */
1917void
1918bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1919{
1920 struct bnad_rx_info *rx_info;
1921 int i;
1922
1923 for (i = 0; i < bnad->num_rx; i++) {
1924 rx_info = &bnad->rx_info[i];
1925 if (!rx_info->rx)
1926 continue;
1927 bna_rx_coalescing_timeo_set(rx_info->rx,
1928 bnad->rx_coalescing_timeo);
1929 }
1930}
1931
1932/*
1933 * Called with bnad->bna_lock held
1934 */
1935static int
1936bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1937{
1938 int ret;
1939
1940 if (!is_valid_ether_addr(mac_addr))
1941 return -EADDRNOTAVAIL;
1942
1943 /* If datapath is down, pretend everything went through */
1944 if (!bnad->rx_info[0].rx)
1945 return 0;
1946
1947 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1948 if (ret != BNA_CB_SUCCESS)
1949 return -EADDRNOTAVAIL;
1950
1951 return 0;
1952}
1953
1954/* Should be called with conf_lock held */
1955static int
1956bnad_enable_default_bcast(struct bnad *bnad)
1957{
1958 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1959 int ret;
1960 unsigned long flags;
1961
1962 init_completion(&bnad->bnad_completions.mcast_comp);
1963
1964 spin_lock_irqsave(&bnad->bna_lock, flags);
1965 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1966 bnad_cb_rx_mcast_add);
1967 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1968
1969 if (ret == BNA_CB_SUCCESS)
1970 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1971 else
1972 return -ENODEV;
1973
1974 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1975 return -ENODEV;
1976
1977 return 0;
1978}
1979
Rasesh Modyaad75b62010-12-23 21:45:08 +00001980/* Called with bnad_conf_lock() held */
1981static void
1982bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1983{
1984 u16 vlan_id;
1985 unsigned long flags;
1986
1987 if (!bnad->vlan_grp)
1988 return;
1989
1990 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1991
1992 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
1993 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
1994 continue;
1995 spin_lock_irqsave(&bnad->bna_lock, flags);
1996 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
1997 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1998 }
1999}
2000
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002001/* Statistics utilities */
2002void
Eric Dumazet250e0612010-09-02 12:45:02 -07002003bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002004{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002005 int i, j;
2006
2007 for (i = 0; i < bnad->num_rx; i++) {
2008 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2009 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002010 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002011 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002012 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002013 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2014 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2015 bnad->rx_info[i].rx_ctrl[j].ccb->
2016 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002017 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002018 bnad->rx_info[i].rx_ctrl[j].
2019 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002020 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002021 bnad->rx_info[i].rx_ctrl[j].
2022 ccb->rcb[1]->rxq->rx_bytes;
2023 }
2024 }
2025 }
2026 }
2027 for (i = 0; i < bnad->num_tx; i++) {
2028 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2029 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002030 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002031 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002032 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002033 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2034 }
2035 }
2036 }
2037}
2038
2039/*
2040 * Must be called with the bna_lock held.
2041 */
2042void
Eric Dumazet250e0612010-09-02 12:45:02 -07002043bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002044{
2045 struct bfi_ll_stats_mac *mac_stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002046 u64 bmap;
2047 int i;
2048
2049 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002050 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002051 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2052 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2053 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002054 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002055 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002056 stats->rx_dropped = mac_stats->rx_drop;
2057 stats->tx_dropped = mac_stats->tx_drop;
2058 stats->multicast = mac_stats->rx_multicast;
2059 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002060
Eric Dumazet250e0612010-09-02 12:45:02 -07002061 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002062
2063 /* receive ring buffer overflow ?? */
2064
Eric Dumazet250e0612010-09-02 12:45:02 -07002065 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2066 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002067 /* recv'r fifo overrun */
2068 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2069 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2070 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2071 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002072 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002073 bnad->stats.bna_stats->
2074 hw_stats->rxf_stats[i].frame_drops;
2075 break;
2076 }
2077 bmap >>= 1;
2078 }
2079}
2080
2081static void
2082bnad_mbox_irq_sync(struct bnad *bnad)
2083{
2084 u32 irq;
2085 unsigned long flags;
2086
2087 spin_lock_irqsave(&bnad->bna_lock, flags);
2088 if (bnad->cfg_flags & BNAD_CF_MSIX)
2089 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2090 else
2091 irq = bnad->pcidev->irq;
2092 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2093
2094 synchronize_irq(irq);
2095}
2096
2097/* Utility used by bnad_start_xmit, for doing TSO */
2098static int
2099bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2100{
2101 int err;
2102
2103 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2104 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2105 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2106 if (skb_header_cloned(skb)) {
2107 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2108 if (err) {
2109 BNAD_UPDATE_CTR(bnad, tso_err);
2110 return err;
2111 }
2112 }
2113
2114 /*
2115 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2116 * excluding the length field.
2117 */
2118 if (skb->protocol == htons(ETH_P_IP)) {
2119 struct iphdr *iph = ip_hdr(skb);
2120
2121 /* Do we really need these? */
2122 iph->tot_len = 0;
2123 iph->check = 0;
2124
2125 tcp_hdr(skb)->check =
2126 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2127 IPPROTO_TCP, 0);
2128 BNAD_UPDATE_CTR(bnad, tso4);
2129 } else {
2130 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2131
2132 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2133 ipv6h->payload_len = 0;
2134 tcp_hdr(skb)->check =
2135 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2136 IPPROTO_TCP, 0);
2137 BNAD_UPDATE_CTR(bnad, tso6);
2138 }
2139
2140 return 0;
2141}
2142
2143/*
2144 * Initialize Q numbers depending on Rx Paths
2145 * Called with bnad->bna_lock held, because of cfg_flags
2146 * access.
2147 */
2148static void
2149bnad_q_num_init(struct bnad *bnad)
2150{
2151 int rxps;
2152
2153 rxps = min((uint)num_online_cpus(),
2154 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2155
2156 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2157 rxps = 1; /* INTx */
2158
2159 bnad->num_rx = 1;
2160 bnad->num_tx = 1;
2161 bnad->num_rxp_per_rx = rxps;
2162 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2163}
2164
2165/*
2166 * Adjusts the Q numbers, given a number of msix vectors
2167 * Give preference to RSS as opposed to Tx priority Queues,
2168 * in such a case, just use 1 Tx Q
2169 * Called with bnad->bna_lock held b'cos of cfg_flags access
2170 */
2171static void
2172bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2173{
2174 bnad->num_txq_per_tx = 1;
2175 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2176 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2177 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2178 bnad->num_rxp_per_rx = msix_vectors -
2179 (bnad->num_tx * bnad->num_txq_per_tx) -
2180 BNAD_MAILBOX_MSIX_VECTORS;
2181 } else
2182 bnad->num_rxp_per_rx = 1;
2183}
2184
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002185/* Enable / disable device */
2186static void
2187bnad_device_disable(struct bnad *bnad)
2188{
2189 unsigned long flags;
2190
2191 init_completion(&bnad->bnad_completions.ioc_comp);
2192
2193 spin_lock_irqsave(&bnad->bna_lock, flags);
2194 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2195 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2196
2197 wait_for_completion(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002198}
2199
2200static int
2201bnad_device_enable(struct bnad *bnad)
2202{
2203 int err = 0;
2204 unsigned long flags;
2205
2206 init_completion(&bnad->bnad_completions.ioc_comp);
2207
2208 spin_lock_irqsave(&bnad->bna_lock, flags);
2209 bna_device_enable(&bnad->bna.device);
2210 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2211
2212 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2213
2214 if (bnad->bnad_completions.ioc_comp_status)
2215 err = bnad->bnad_completions.ioc_comp_status;
2216
2217 return err;
2218}
2219
2220/* Free BNA resources */
2221static void
2222bnad_res_free(struct bnad *bnad)
2223{
2224 int i;
2225 struct bna_res_info *res_info = &bnad->res_info[0];
2226
2227 for (i = 0; i < BNA_RES_T_MAX; i++) {
2228 if (res_info[i].res_type == BNA_RES_T_MEM)
2229 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2230 else
2231 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2232 }
2233}
2234
2235/* Allocates memory and interrupt resources for BNA */
2236static int
2237bnad_res_alloc(struct bnad *bnad)
2238{
2239 int i, err;
2240 struct bna_res_info *res_info = &bnad->res_info[0];
2241
2242 for (i = 0; i < BNA_RES_T_MAX; i++) {
2243 if (res_info[i].res_type == BNA_RES_T_MEM)
2244 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2245 else
2246 err = bnad_mbox_irq_alloc(bnad,
2247 &res_info[i].res_u.intr_info);
2248 if (err)
2249 goto err_return;
2250 }
2251 return 0;
2252
2253err_return:
2254 bnad_res_free(bnad);
2255 return err;
2256}
2257
2258/* Interrupt enable / disable */
2259static void
2260bnad_enable_msix(struct bnad *bnad)
2261{
2262 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002263 unsigned long flags;
2264
2265 spin_lock_irqsave(&bnad->bna_lock, flags);
2266 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2267 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2268 return;
2269 }
2270 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2271
2272 if (bnad->msix_table)
2273 return;
2274
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002275 bnad->msix_table =
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002276 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002277
2278 if (!bnad->msix_table)
2279 goto intx_mode;
2280
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002281 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002282 bnad->msix_table[i].entry = i;
2283
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002284 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002285 if (ret > 0) {
2286 /* Not enough MSI-X vectors. */
2287
2288 spin_lock_irqsave(&bnad->bna_lock, flags);
2289 /* ret = #of vectors that we got */
2290 bnad_q_num_adjust(bnad, ret);
2291 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2292
2293 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2294 + (bnad->num_rx
2295 * bnad->num_rxp_per_rx) +
2296 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002297
2298 /* Try once more with adjusted numbers */
2299 /* If this fails, fall back to INTx */
2300 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002301 bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002302 if (ret)
2303 goto intx_mode;
2304
2305 } else if (ret < 0)
2306 goto intx_mode;
2307 return;
2308
2309intx_mode:
2310
2311 kfree(bnad->msix_table);
2312 bnad->msix_table = NULL;
2313 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002314 spin_lock_irqsave(&bnad->bna_lock, flags);
2315 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2316 bnad_q_num_init(bnad);
2317 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2318}
2319
2320static void
2321bnad_disable_msix(struct bnad *bnad)
2322{
2323 u32 cfg_flags;
2324 unsigned long flags;
2325
2326 spin_lock_irqsave(&bnad->bna_lock, flags);
2327 cfg_flags = bnad->cfg_flags;
2328 if (bnad->cfg_flags & BNAD_CF_MSIX)
2329 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2330 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2331
2332 if (cfg_flags & BNAD_CF_MSIX) {
2333 pci_disable_msix(bnad->pcidev);
2334 kfree(bnad->msix_table);
2335 bnad->msix_table = NULL;
2336 }
2337}
2338
2339/* Netdev entry points */
2340static int
2341bnad_open(struct net_device *netdev)
2342{
2343 int err;
2344 struct bnad *bnad = netdev_priv(netdev);
2345 struct bna_pause_config pause_config;
2346 int mtu;
2347 unsigned long flags;
2348
2349 mutex_lock(&bnad->conf_mutex);
2350
2351 /* Tx */
2352 err = bnad_setup_tx(bnad, 0);
2353 if (err)
2354 goto err_return;
2355
2356 /* Rx */
2357 err = bnad_setup_rx(bnad, 0);
2358 if (err)
2359 goto cleanup_tx;
2360
2361 /* Port */
2362 pause_config.tx_pause = 0;
2363 pause_config.rx_pause = 0;
2364
2365 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2366
2367 spin_lock_irqsave(&bnad->bna_lock, flags);
2368 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2369 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2370 bna_port_enable(&bnad->bna.port);
2371 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2372
2373 /* Enable broadcast */
2374 bnad_enable_default_bcast(bnad);
2375
Rasesh Modyaad75b62010-12-23 21:45:08 +00002376 /* Restore VLANs, if any */
2377 bnad_restore_vlans(bnad, 0);
2378
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002379 /* Set the UCAST address */
2380 spin_lock_irqsave(&bnad->bna_lock, flags);
2381 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2382 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2383
2384 /* Start the stats timer */
2385 bnad_stats_timer_start(bnad);
2386
2387 mutex_unlock(&bnad->conf_mutex);
2388
2389 return 0;
2390
2391cleanup_tx:
2392 bnad_cleanup_tx(bnad, 0);
2393
2394err_return:
2395 mutex_unlock(&bnad->conf_mutex);
2396 return err;
2397}
2398
2399static int
2400bnad_stop(struct net_device *netdev)
2401{
2402 struct bnad *bnad = netdev_priv(netdev);
2403 unsigned long flags;
2404
2405 mutex_lock(&bnad->conf_mutex);
2406
2407 /* Stop the stats timer */
2408 bnad_stats_timer_stop(bnad);
2409
2410 init_completion(&bnad->bnad_completions.port_comp);
2411
2412 spin_lock_irqsave(&bnad->bna_lock, flags);
2413 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2414 bnad_cb_port_disabled);
2415 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2416
2417 wait_for_completion(&bnad->bnad_completions.port_comp);
2418
2419 bnad_cleanup_tx(bnad, 0);
2420 bnad_cleanup_rx(bnad, 0);
2421
2422 /* Synchronize mailbox IRQ */
2423 bnad_mbox_irq_sync(bnad);
2424
2425 mutex_unlock(&bnad->conf_mutex);
2426
2427 return 0;
2428}
2429
2430/* TX */
2431/*
2432 * bnad_start_xmit : Netdev entry point for Transmit
2433 * Called under lock held by net_device
2434 */
2435static netdev_tx_t
2436bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2437{
2438 struct bnad *bnad = netdev_priv(netdev);
2439
2440 u16 txq_prod, vlan_tag = 0;
2441 u32 unmap_prod, wis, wis_used, wi_range;
2442 u32 vectors, vect_id, i, acked;
2443 u32 tx_id;
2444 int err;
2445
2446 struct bnad_tx_info *tx_info;
2447 struct bna_tcb *tcb;
2448 struct bnad_unmap_q *unmap_q;
2449 dma_addr_t dma_addr;
2450 struct bna_txq_entry *txqent;
2451 bna_txq_wi_ctrl_flag_t flags;
2452
2453 if (unlikely
2454 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2455 dev_kfree_skb(skb);
2456 return NETDEV_TX_OK;
2457 }
2458
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002459 tx_id = 0;
2460
2461 tx_info = &bnad->tx_info[tx_id];
2462 tcb = tx_info->tcb[tx_id];
2463 unmap_q = tcb->unmap_q;
2464
Rasesh Modybe7fa322010-12-23 21:45:01 +00002465 /*
2466 * Takes care of the Tx that is scheduled between clearing the flag
2467 * and the netif_stop_queue() call.
2468 */
2469 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2470 dev_kfree_skb(skb);
2471 return NETDEV_TX_OK;
2472 }
2473
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002474 vectors = 1 + skb_shinfo(skb)->nr_frags;
2475 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2476 dev_kfree_skb(skb);
2477 return NETDEV_TX_OK;
2478 }
2479 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2480 acked = 0;
2481 if (unlikely
2482 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2483 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2484 if ((u16) (*tcb->hw_consumer_index) !=
2485 tcb->consumer_index &&
2486 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2487 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002488 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2489 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002490 smp_mb__before_clear_bit();
2491 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2492 } else {
2493 netif_stop_queue(netdev);
2494 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2495 }
2496
2497 smp_mb();
2498 /*
2499 * Check again to deal with race condition between
2500 * netif_stop_queue here, and netif_wake_queue in
2501 * interrupt handler which is not inside netif tx lock.
2502 */
2503 if (likely
2504 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2505 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2506 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2507 return NETDEV_TX_BUSY;
2508 } else {
2509 netif_wake_queue(netdev);
2510 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2511 }
2512 }
2513
2514 unmap_prod = unmap_q->producer_index;
2515 wis_used = 1;
2516 vect_id = 0;
2517 flags = 0;
2518
2519 txq_prod = tcb->producer_index;
2520 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2521 BUG_ON(!(wi_range <= tcb->q_depth));
2522 txqent->hdr.wi.reserved = 0;
2523 txqent->hdr.wi.num_vectors = vectors;
2524 txqent->hdr.wi.opcode =
2525 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2526 BNA_TXQ_WI_SEND));
2527
Jesse Grosseab6d182010-10-20 13:56:03 +00002528 if (vlan_tx_tag_present(skb)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002529 vlan_tag = (u16) vlan_tx_tag_get(skb);
2530 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2531 }
2532 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2533 vlan_tag =
2534 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2535 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2536 }
2537
2538 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2539
2540 if (skb_is_gso(skb)) {
2541 err = bnad_tso_prepare(bnad, skb);
2542 if (err) {
2543 dev_kfree_skb(skb);
2544 return NETDEV_TX_OK;
2545 }
2546 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2547 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2548 txqent->hdr.wi.l4_hdr_size_n_offset =
2549 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2550 (tcp_hdrlen(skb) >> 2,
2551 skb_transport_offset(skb)));
2552 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2553 u8 proto = 0;
2554
2555 txqent->hdr.wi.lso_mss = 0;
2556
2557 if (skb->protocol == htons(ETH_P_IP))
2558 proto = ip_hdr(skb)->protocol;
2559 else if (skb->protocol == htons(ETH_P_IPV6)) {
2560 /* nexthdr may not be TCP immediately. */
2561 proto = ipv6_hdr(skb)->nexthdr;
2562 }
2563 if (proto == IPPROTO_TCP) {
2564 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2565 txqent->hdr.wi.l4_hdr_size_n_offset =
2566 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2567 (0, skb_transport_offset(skb)));
2568
2569 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2570
2571 BUG_ON(!(skb_headlen(skb) >=
2572 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2573
2574 } else if (proto == IPPROTO_UDP) {
2575 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2576 txqent->hdr.wi.l4_hdr_size_n_offset =
2577 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2578 (0, skb_transport_offset(skb)));
2579
2580 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2581
2582 BUG_ON(!(skb_headlen(skb) >=
2583 skb_transport_offset(skb) +
2584 sizeof(struct udphdr)));
2585 } else {
2586 err = skb_checksum_help(skb);
2587 BNAD_UPDATE_CTR(bnad, csum_help);
2588 if (err) {
2589 dev_kfree_skb(skb);
2590 BNAD_UPDATE_CTR(bnad, csum_help_err);
2591 return NETDEV_TX_OK;
2592 }
2593 }
2594 } else {
2595 txqent->hdr.wi.lso_mss = 0;
2596 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2597 }
2598
2599 txqent->hdr.wi.flags = htons(flags);
2600
2601 txqent->hdr.wi.frame_length = htonl(skb->len);
2602
2603 unmap_q->unmap_array[unmap_prod].skb = skb;
2604 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2605 txqent->vector[vect_id].length = htons(skb_headlen(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +00002606 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2607 skb_headlen(skb), DMA_TO_DEVICE);
2608 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002609 dma_addr);
2610
2611 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2612 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2613
2614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2615 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2616 u32 size = frag->size;
2617
2618 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2619 vect_id = 0;
2620 if (--wi_range)
2621 txqent++;
2622 else {
2623 BNA_QE_INDX_ADD(txq_prod, wis_used,
2624 tcb->q_depth);
2625 wis_used = 0;
2626 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2627 txqent, wi_range);
2628 BUG_ON(!(wi_range <= tcb->q_depth));
2629 }
2630 wis_used++;
2631 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2632 }
2633
2634 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2635 txqent->vector[vect_id].length = htons(size);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002636 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2637 frag->page_offset, size, DMA_TO_DEVICE);
2638 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002639 dma_addr);
2640 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2641 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2642 }
2643
2644 unmap_q->producer_index = unmap_prod;
2645 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2646 tcb->producer_index = txq_prod;
2647
2648 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00002649
2650 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2651 return NETDEV_TX_OK;
2652
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002653 bna_txq_prod_indx_doorbell(tcb);
2654
2655 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2656 tasklet_schedule(&bnad->tx_free_tasklet);
2657
2658 return NETDEV_TX_OK;
2659}
2660
2661/*
2662 * Used spin_lock to synchronize reading of stats structures, which
2663 * is written by BNA under the same lock.
2664 */
Eric Dumazet250e0612010-09-02 12:45:02 -07002665static struct rtnl_link_stats64 *
2666bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002667{
2668 struct bnad *bnad = netdev_priv(netdev);
2669 unsigned long flags;
2670
2671 spin_lock_irqsave(&bnad->bna_lock, flags);
2672
Eric Dumazet250e0612010-09-02 12:45:02 -07002673 bnad_netdev_qstats_fill(bnad, stats);
2674 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002675
2676 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2677
Eric Dumazet250e0612010-09-02 12:45:02 -07002678 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002679}
2680
2681static void
2682bnad_set_rx_mode(struct net_device *netdev)
2683{
2684 struct bnad *bnad = netdev_priv(netdev);
2685 u32 new_mask, valid_mask;
2686 unsigned long flags;
2687
2688 spin_lock_irqsave(&bnad->bna_lock, flags);
2689
2690 new_mask = valid_mask = 0;
2691
2692 if (netdev->flags & IFF_PROMISC) {
2693 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2694 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2695 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2696 bnad->cfg_flags |= BNAD_CF_PROMISC;
2697 }
2698 } else {
2699 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2700 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2701 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2702 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2703 }
2704 }
2705
2706 if (netdev->flags & IFF_ALLMULTI) {
2707 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2708 new_mask |= BNA_RXMODE_ALLMULTI;
2709 valid_mask |= BNA_RXMODE_ALLMULTI;
2710 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2711 }
2712 } else {
2713 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2714 new_mask &= ~BNA_RXMODE_ALLMULTI;
2715 valid_mask |= BNA_RXMODE_ALLMULTI;
2716 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2717 }
2718 }
2719
2720 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2721
2722 if (!netdev_mc_empty(netdev)) {
2723 u8 *mcaddr_list;
2724 int mc_count = netdev_mc_count(netdev);
2725
2726 /* Index 0 holds the broadcast address */
2727 mcaddr_list =
2728 kzalloc((mc_count + 1) * ETH_ALEN,
2729 GFP_ATOMIC);
2730 if (!mcaddr_list)
Jiri Slabyca1cef32010-09-04 02:08:41 +00002731 goto unlock;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002732
2733 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2734
2735 /* Copy rest of the MC addresses */
2736 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2737
2738 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2739 mcaddr_list, NULL);
2740
2741 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2742 kfree(mcaddr_list);
2743 }
Jiri Slabyca1cef32010-09-04 02:08:41 +00002744unlock:
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002745 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2746}
2747
2748/*
2749 * bna_lock is used to sync writes to netdev->addr
2750 * conf_lock cannot be used since this call may be made
2751 * in a non-blocking context.
2752 */
2753static int
2754bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2755{
2756 int err;
2757 struct bnad *bnad = netdev_priv(netdev);
2758 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2759 unsigned long flags;
2760
2761 spin_lock_irqsave(&bnad->bna_lock, flags);
2762
2763 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2764
2765 if (!err)
2766 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2767
2768 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2769
2770 return err;
2771}
2772
2773static int
2774bnad_change_mtu(struct net_device *netdev, int new_mtu)
2775{
2776 int mtu, err = 0;
2777 unsigned long flags;
2778
2779 struct bnad *bnad = netdev_priv(netdev);
2780
2781 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2782 return -EINVAL;
2783
2784 mutex_lock(&bnad->conf_mutex);
2785
2786 netdev->mtu = new_mtu;
2787
2788 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2789
2790 spin_lock_irqsave(&bnad->bna_lock, flags);
2791 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2792 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2793
2794 mutex_unlock(&bnad->conf_mutex);
2795 return err;
2796}
2797
2798static void
2799bnad_vlan_rx_register(struct net_device *netdev,
2800 struct vlan_group *vlan_grp)
2801{
2802 struct bnad *bnad = netdev_priv(netdev);
2803
2804 mutex_lock(&bnad->conf_mutex);
2805 bnad->vlan_grp = vlan_grp;
2806 mutex_unlock(&bnad->conf_mutex);
2807}
2808
2809static void
2810bnad_vlan_rx_add_vid(struct net_device *netdev,
2811 unsigned short vid)
2812{
2813 struct bnad *bnad = netdev_priv(netdev);
2814 unsigned long flags;
2815
2816 if (!bnad->rx_info[0].rx)
2817 return;
2818
2819 mutex_lock(&bnad->conf_mutex);
2820
2821 spin_lock_irqsave(&bnad->bna_lock, flags);
2822 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2823 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2824
2825 mutex_unlock(&bnad->conf_mutex);
2826}
2827
2828static void
2829bnad_vlan_rx_kill_vid(struct net_device *netdev,
2830 unsigned short vid)
2831{
2832 struct bnad *bnad = netdev_priv(netdev);
2833 unsigned long flags;
2834
2835 if (!bnad->rx_info[0].rx)
2836 return;
2837
2838 mutex_lock(&bnad->conf_mutex);
2839
2840 spin_lock_irqsave(&bnad->bna_lock, flags);
2841 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2842 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2843
2844 mutex_unlock(&bnad->conf_mutex);
2845}
2846
2847#ifdef CONFIG_NET_POLL_CONTROLLER
2848static void
2849bnad_netpoll(struct net_device *netdev)
2850{
2851 struct bnad *bnad = netdev_priv(netdev);
2852 struct bnad_rx_info *rx_info;
2853 struct bnad_rx_ctrl *rx_ctrl;
2854 u32 curr_mask;
2855 int i, j;
2856
2857 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2858 bna_intx_disable(&bnad->bna, curr_mask);
2859 bnad_isr(bnad->pcidev->irq, netdev);
2860 bna_intx_enable(&bnad->bna, curr_mask);
2861 } else {
2862 for (i = 0; i < bnad->num_rx; i++) {
2863 rx_info = &bnad->rx_info[i];
2864 if (!rx_info->rx)
2865 continue;
2866 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2867 rx_ctrl = &rx_info->rx_ctrl[j];
2868 if (rx_ctrl->ccb) {
2869 bnad_disable_rx_irq(bnad,
2870 rx_ctrl->ccb);
2871 bnad_netif_rx_schedule_poll(bnad,
2872 rx_ctrl->ccb);
2873 }
2874 }
2875 }
2876 }
2877}
2878#endif
2879
2880static const struct net_device_ops bnad_netdev_ops = {
2881 .ndo_open = bnad_open,
2882 .ndo_stop = bnad_stop,
2883 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07002884 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002885 .ndo_set_rx_mode = bnad_set_rx_mode,
2886 .ndo_set_multicast_list = bnad_set_rx_mode,
2887 .ndo_validate_addr = eth_validate_addr,
2888 .ndo_set_mac_address = bnad_set_mac_address,
2889 .ndo_change_mtu = bnad_change_mtu,
2890 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2891 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2892 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2893#ifdef CONFIG_NET_POLL_CONTROLLER
2894 .ndo_poll_controller = bnad_netpoll
2895#endif
2896};
2897
2898static void
2899bnad_netdev_init(struct bnad *bnad, bool using_dac)
2900{
2901 struct net_device *netdev = bnad->netdev;
2902
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00002903 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2904 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2905 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002906
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00002907 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2908 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2909 NETIF_F_TSO | NETIF_F_TSO6;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002910
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00002911 netdev->features |= netdev->hw_features |
2912 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002913
2914 if (using_dac)
2915 netdev->features |= NETIF_F_HIGHDMA;
2916
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002917 netdev->mem_start = bnad->mmio_start;
2918 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2919
2920 netdev->netdev_ops = &bnad_netdev_ops;
2921 bnad_set_ethtool_ops(netdev);
2922}
2923
2924/*
2925 * 1. Initialize the bnad structure
2926 * 2. Setup netdev pointer in pci_dev
2927 * 3. Initialze Tx free tasklet
2928 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2929 */
2930static int
2931bnad_init(struct bnad *bnad,
2932 struct pci_dev *pdev, struct net_device *netdev)
2933{
2934 unsigned long flags;
2935
2936 SET_NETDEV_DEV(netdev, &pdev->dev);
2937 pci_set_drvdata(pdev, netdev);
2938
2939 bnad->netdev = netdev;
2940 bnad->pcidev = pdev;
2941 bnad->mmio_start = pci_resource_start(pdev, 0);
2942 bnad->mmio_len = pci_resource_len(pdev, 0);
2943 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2944 if (!bnad->bar0) {
2945 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2946 pci_set_drvdata(pdev, NULL);
2947 return -ENOMEM;
2948 }
2949 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2950 (unsigned long long) bnad->mmio_len);
2951
2952 spin_lock_irqsave(&bnad->bna_lock, flags);
2953 if (!bnad_msix_disable)
2954 bnad->cfg_flags = BNAD_CF_MSIX;
2955
2956 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2957
2958 bnad_q_num_init(bnad);
2959 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2960
2961 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2962 (bnad->num_rx * bnad->num_rxp_per_rx) +
2963 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002964
2965 bnad->txq_depth = BNAD_TXQ_DEPTH;
2966 bnad->rxq_depth = BNAD_RXQ_DEPTH;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002967
2968 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2969 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2970
2971 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2972 (unsigned long)bnad);
2973
2974 return 0;
2975}
2976
2977/*
2978 * Must be called after bnad_pci_uninit()
2979 * so that iounmap() and pci_set_drvdata(NULL)
2980 * happens only after PCI uninitialization.
2981 */
2982static void
2983bnad_uninit(struct bnad *bnad)
2984{
2985 if (bnad->bar0)
2986 iounmap(bnad->bar0);
2987 pci_set_drvdata(bnad->pcidev, NULL);
2988}
2989
2990/*
2991 * Initialize locks
2992 a) Per device mutes used for serializing configuration
2993 changes from OS interface
2994 b) spin lock used to protect bna state machine
2995 */
2996static void
2997bnad_lock_init(struct bnad *bnad)
2998{
2999 spin_lock_init(&bnad->bna_lock);
3000 mutex_init(&bnad->conf_mutex);
3001}
3002
3003static void
3004bnad_lock_uninit(struct bnad *bnad)
3005{
3006 mutex_destroy(&bnad->conf_mutex);
3007}
3008
3009/* PCI Initialization */
3010static int
3011bnad_pci_init(struct bnad *bnad,
3012 struct pci_dev *pdev, bool *using_dac)
3013{
3014 int err;
3015
3016 err = pci_enable_device(pdev);
3017 if (err)
3018 return err;
3019 err = pci_request_regions(pdev, BNAD_NAME);
3020 if (err)
3021 goto disable_device;
Ivan Vecera5ea74312011-02-02 04:37:02 +00003022 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3023 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003024 *using_dac = 1;
3025 } else {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003026 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003027 if (err) {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003028 err = dma_set_coherent_mask(&pdev->dev,
3029 DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003030 if (err)
3031 goto release_regions;
3032 }
3033 *using_dac = 0;
3034 }
3035 pci_set_master(pdev);
3036 return 0;
3037
3038release_regions:
3039 pci_release_regions(pdev);
3040disable_device:
3041 pci_disable_device(pdev);
3042
3043 return err;
3044}
3045
3046static void
3047bnad_pci_uninit(struct pci_dev *pdev)
3048{
3049 pci_release_regions(pdev);
3050 pci_disable_device(pdev);
3051}
3052
3053static int __devinit
3054bnad_pci_probe(struct pci_dev *pdev,
3055 const struct pci_device_id *pcidev_id)
3056{
Rasesh Modyaad75b62010-12-23 21:45:08 +00003057 bool using_dac = false;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003058 int err;
3059 struct bnad *bnad;
3060 struct bna *bna;
3061 struct net_device *netdev;
3062 struct bfa_pcidev pcidev_info;
3063 unsigned long flags;
3064
3065 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3066 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3067
3068 mutex_lock(&bnad_fwimg_mutex);
3069 if (!cna_get_firmware_buf(pdev)) {
3070 mutex_unlock(&bnad_fwimg_mutex);
3071 pr_warn("Failed to load Firmware Image!\n");
3072 return -ENODEV;
3073 }
3074 mutex_unlock(&bnad_fwimg_mutex);
3075
3076 /*
3077 * Allocates sizeof(struct net_device + struct bnad)
3078 * bnad = netdev->priv
3079 */
3080 netdev = alloc_etherdev(sizeof(struct bnad));
3081 if (!netdev) {
3082 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3083 err = -ENOMEM;
3084 return err;
3085 }
3086 bnad = netdev_priv(netdev);
3087
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003088 /*
3089 * PCI initialization
3090 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003091 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003092 */
3093 err = bnad_pci_init(bnad, pdev, &using_dac);
3094 if (err)
3095 goto free_netdev;
3096
3097 bnad_lock_init(bnad);
3098 /*
3099 * Initialize bnad structure
3100 * Setup relation between pci_dev & netdev
3101 * Init Tx free tasklet
3102 */
3103 err = bnad_init(bnad, pdev, netdev);
3104 if (err)
3105 goto pci_uninit;
3106 /* Initialize netdev structure, set up ethtool ops */
3107 bnad_netdev_init(bnad, using_dac);
3108
Rasesh Mody815f41e2010-12-23 21:45:03 +00003109 /* Set link to down state */
3110 netif_carrier_off(netdev);
3111
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003112 bnad_enable_msix(bnad);
3113
3114 /* Get resource requirement form bna */
3115 bna_res_req(&bnad->res_info[0]);
3116
3117 /* Allocate resources from bna */
3118 err = bnad_res_alloc(bnad);
3119 if (err)
3120 goto free_netdev;
3121
3122 bna = &bnad->bna;
3123
3124 /* Setup pcidev_info for bna_init() */
3125 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3126 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3127 pcidev_info.device_id = bnad->pcidev->device;
3128 pcidev_info.pci_bar_kva = bnad->bar0;
3129
3130 mutex_lock(&bnad->conf_mutex);
3131
3132 spin_lock_irqsave(&bnad->bna_lock, flags);
3133 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003134 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3135
3136 bnad->stats.bna_stats = &bna->stats;
3137
3138 /* Set up timers */
3139 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3140 ((unsigned long)bnad));
3141 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3142 ((unsigned long)bnad));
Rasesh Mody1d32f762010-12-23 21:45:09 +00003143 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3144 ((unsigned long)bnad));
3145 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003146 ((unsigned long)bnad));
3147
3148 /* Now start the timer before calling IOC */
Rasesh Mody1d32f762010-12-23 21:45:09 +00003149 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003150 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3151
3152 /*
3153 * Start the chip
3154 * Don't care even if err != 0, bna state machine will
3155 * deal with it
3156 */
3157 err = bnad_device_enable(bnad);
3158
3159 /* Get the burnt-in mac */
3160 spin_lock_irqsave(&bnad->bna_lock, flags);
3161 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3162 bnad_set_netdev_perm_addr(bnad);
3163 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3164
3165 mutex_unlock(&bnad->conf_mutex);
3166
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003167 /* Finally, reguister with net_device layer */
3168 err = register_netdev(netdev);
3169 if (err) {
3170 pr_err("BNA : Registering with netdev failed\n");
3171 goto disable_device;
3172 }
3173
3174 return 0;
3175
3176disable_device:
3177 mutex_lock(&bnad->conf_mutex);
3178 bnad_device_disable(bnad);
3179 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3180 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3181 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3182 spin_lock_irqsave(&bnad->bna_lock, flags);
3183 bna_uninit(bna);
3184 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3185 mutex_unlock(&bnad->conf_mutex);
3186
3187 bnad_res_free(bnad);
3188 bnad_disable_msix(bnad);
3189pci_uninit:
3190 bnad_pci_uninit(pdev);
3191 bnad_lock_uninit(bnad);
3192 bnad_uninit(bnad);
3193free_netdev:
3194 free_netdev(netdev);
3195 return err;
3196}
3197
3198static void __devexit
3199bnad_pci_remove(struct pci_dev *pdev)
3200{
3201 struct net_device *netdev = pci_get_drvdata(pdev);
3202 struct bnad *bnad;
3203 struct bna *bna;
3204 unsigned long flags;
3205
3206 if (!netdev)
3207 return;
3208
3209 pr_info("%s bnad_pci_remove\n", netdev->name);
3210 bnad = netdev_priv(netdev);
3211 bna = &bnad->bna;
3212
3213 unregister_netdev(netdev);
3214
3215 mutex_lock(&bnad->conf_mutex);
3216 bnad_device_disable(bnad);
3217 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3218 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3219 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3220 spin_lock_irqsave(&bnad->bna_lock, flags);
3221 bna_uninit(bna);
3222 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3223 mutex_unlock(&bnad->conf_mutex);
3224
3225 bnad_res_free(bnad);
3226 bnad_disable_msix(bnad);
3227 bnad_pci_uninit(pdev);
3228 bnad_lock_uninit(bnad);
3229 bnad_uninit(bnad);
3230 free_netdev(netdev);
3231}
3232
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00003233static const struct pci_device_id bnad_pci_id_table[] = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003234 {
3235 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3236 PCI_DEVICE_ID_BROCADE_CT),
3237 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3238 .class_mask = 0xffff00
3239 }, {0, }
3240};
3241
3242MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3243
3244static struct pci_driver bnad_pci_driver = {
3245 .name = BNAD_NAME,
3246 .id_table = bnad_pci_id_table,
3247 .probe = bnad_pci_probe,
3248 .remove = __devexit_p(bnad_pci_remove),
3249};
3250
3251static int __init
3252bnad_module_init(void)
3253{
3254 int err;
3255
3256 pr_info("Brocade 10G Ethernet driver\n");
3257
Rasesh Mody8a891422010-08-25 23:00:27 -07003258 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003259
3260 err = pci_register_driver(&bnad_pci_driver);
3261 if (err < 0) {
3262 pr_err("bna : PCI registration failed in module init "
3263 "(%d)\n", err);
3264 return err;
3265 }
3266
3267 return 0;
3268}
3269
3270static void __exit
3271bnad_module_exit(void)
3272{
3273 pci_unregister_driver(&bnad_pci_driver);
3274
3275 if (bfi_fw)
3276 release_firmware(bfi_fw);
3277}
3278
3279module_init(bnad_module_init);
3280module_exit(bnad_module_exit);
3281
3282MODULE_AUTHOR("Brocade");
3283MODULE_LICENSE("GPL");
3284MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3285MODULE_VERSION(BNAD_VERSION);
3286MODULE_FIRMWARE(CNA_FW_FILE_CT);