blob: 7d25a97d33f6fd2daffd467e9be3984dd8153bda [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/etherdevice.h>
21#include <linux/in.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/if_ether.h>
25#include <linux/ip.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040026#include <linux/prefetch.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070027
28#include "bnad.h"
29#include "bna.h"
30#include "cna.h"
31
Rasesh Modyb7ee31c2010-10-05 15:46:05 +000032static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070033
34/*
35 * Module params
36 */
37static uint bnad_msix_disable;
38module_param(bnad_msix_disable, uint, 0444);
39MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40
41static uint bnad_ioc_auto_recover = 1;
42module_param(bnad_ioc_auto_recover, uint, 0444);
43MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
44
45/*
46 * Global variables
47 */
48u32 bnad_rxqs_per_cq = 2;
49
Rasesh Modyb7ee31c2010-10-05 15:46:05 +000050static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Rasesh Mody8b230ed2010-08-23 20:24:12 -070051
52/*
53 * Local MACROS
54 */
55#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56
57#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58
59#define BNAD_GET_MBOX_IRQ(_bnad) \
60 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
61 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
62 ((_bnad)->pcidev->irq))
63
64#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
65do { \
66 (_res_info)->res_type = BNA_RES_T_MEM; \
67 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
68 (_res_info)->res_u.mem_info.num = (_num); \
69 (_res_info)->res_u.mem_info.len = \
70 sizeof(struct bnad_unmap_q) + \
71 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
72} while (0)
73
Rasesh Modybe7fa322010-12-23 21:45:01 +000074#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
75
Rasesh Mody8b230ed2010-08-23 20:24:12 -070076/*
77 * Reinitialize completions in CQ, once Rx is taken down
78 */
79static void
80bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
81{
82 struct bna_cq_entry *cmpl, *next_cmpl;
83 unsigned int wi_range, wis = 0, ccb_prod = 0;
84 int i;
85
86 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
87 wi_range);
88
89 for (i = 0; i < ccb->q_depth; i++) {
90 wis++;
91 if (likely(--wi_range))
92 next_cmpl = cmpl + 1;
93 else {
94 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
95 wis = 0;
96 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
97 next_cmpl, wi_range);
98 }
99 cmpl->valid = 0;
100 cmpl = next_cmpl;
101 }
102}
103
104/*
105 * Frees all pending Tx Bufs
106 * At this point no activity is expected on the Q,
107 * so DMA unmap & freeing is fine.
108 */
109static void
110bnad_free_all_txbufs(struct bnad *bnad,
111 struct bna_tcb *tcb)
112{
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000113 u32 unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700114 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
115 struct bnad_skb_unmap *unmap_array;
116 struct sk_buff *skb = NULL;
117 int i;
118
119 unmap_array = unmap_q->unmap_array;
120
121 unmap_cons = 0;
122 while (unmap_cons < unmap_q->q_depth) {
123 skb = unmap_array[unmap_cons].skb;
124 if (!skb) {
125 unmap_cons++;
126 continue;
127 }
128 unmap_array[unmap_cons].skb = NULL;
129
Ivan Vecera5ea74312011-02-02 04:37:02 +0000130 dma_unmap_single(&bnad->pcidev->dev,
131 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700132 dma_addr), skb_headlen(skb),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000133 DMA_TO_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700134
Ivan Vecera5ea74312011-02-02 04:37:02 +0000135 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000136 if (++unmap_cons >= unmap_q->q_depth)
137 break;
138
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000140 dma_unmap_page(&bnad->pcidev->dev,
141 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700142 dma_addr),
143 skb_shinfo(skb)->frags[i].size,
Ivan Vecera5ea74312011-02-02 04:37:02 +0000144 DMA_TO_DEVICE);
145 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700146 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000147 if (++unmap_cons >= unmap_q->q_depth)
148 break;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700149 }
150 dev_kfree_skb_any(skb);
151 }
152}
153
154/* Data Path Handlers */
155
156/*
157 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
158 * Can be called in a) Interrupt context
159 * b) Sending context
160 * c) Tasklet context
161 */
162static u32
163bnad_free_txbufs(struct bnad *bnad,
164 struct bna_tcb *tcb)
165{
166 u32 sent_packets = 0, sent_bytes = 0;
167 u16 wis, unmap_cons, updated_hw_cons;
168 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
169 struct bnad_skb_unmap *unmap_array;
170 struct sk_buff *skb;
171 int i;
172
173 /*
174 * Just return if TX is stopped. This check is useful
175 * when bnad_free_txbufs() runs out of a tasklet scheduled
Rasesh Modybe7fa322010-12-23 21:45:01 +0000176 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700177 * but this routine runs actually after the cleanup has been
178 * executed.
179 */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000180 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700181 return 0;
182
183 updated_hw_cons = *(tcb->hw_consumer_index);
184
185 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
186 updated_hw_cons, tcb->q_depth);
187
188 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
189
190 unmap_array = unmap_q->unmap_array;
191 unmap_cons = unmap_q->consumer_index;
192
193 prefetch(&unmap_array[unmap_cons + 1]);
194 while (wis) {
195 skb = unmap_array[unmap_cons].skb;
196
197 unmap_array[unmap_cons].skb = NULL;
198
199 sent_packets++;
200 sent_bytes += skb->len;
201 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
202
Ivan Vecera5ea74312011-02-02 04:37:02 +0000203 dma_unmap_single(&bnad->pcidev->dev,
204 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700205 dma_addr), skb_headlen(skb),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000206 DMA_TO_DEVICE);
207 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700208 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
209
210 prefetch(&unmap_array[unmap_cons + 1]);
211 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
212 prefetch(&unmap_array[unmap_cons + 1]);
213
Ivan Vecera5ea74312011-02-02 04:37:02 +0000214 dma_unmap_page(&bnad->pcidev->dev,
215 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700216 dma_addr),
217 skb_shinfo(skb)->frags[i].size,
Ivan Vecera5ea74312011-02-02 04:37:02 +0000218 DMA_TO_DEVICE);
219 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700220 0);
221 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
222 }
223 dev_kfree_skb_any(skb);
224 }
225
226 /* Update consumer pointers. */
227 tcb->consumer_index = updated_hw_cons;
228 unmap_q->consumer_index = unmap_cons;
229
230 tcb->txq->tx_packets += sent_packets;
231 tcb->txq->tx_bytes += sent_bytes;
232
233 return sent_packets;
234}
235
236/* Tx Free Tasklet function */
237/* Frees for all the tcb's in all the Tx's */
238/*
239 * Scheduled from sending context, so that
240 * the fat Tx lock is not held for too long
241 * in the sending context.
242 */
243static void
244bnad_tx_free_tasklet(unsigned long bnad_ptr)
245{
246 struct bnad *bnad = (struct bnad *)bnad_ptr;
247 struct bna_tcb *tcb;
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000248 u32 acked = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700249 int i, j;
250
251 for (i = 0; i < bnad->num_tx; i++) {
252 for (j = 0; j < bnad->num_txq_per_tx; j++) {
253 tcb = bnad->tx_info[i].tcb[j];
254 if (!tcb)
255 continue;
256 if (((u16) (*tcb->hw_consumer_index) !=
257 tcb->consumer_index) &&
258 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
259 &tcb->flags))) {
260 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000261 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
262 &tcb->flags)))
263 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700264 smp_mb__before_clear_bit();
265 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
266 }
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000267 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
268 &tcb->flags)))
269 continue;
270 if (netif_queue_stopped(bnad->netdev)) {
271 if (acked && netif_carrier_ok(bnad->netdev) &&
272 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
273 BNAD_NETIF_WAKE_THRESHOLD) {
274 netif_wake_queue(bnad->netdev);
275 /* TODO */
276 /* Counters for individual TxQs? */
277 BNAD_UPDATE_CTR(bnad,
278 netif_queue_wakeup);
279 }
280 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700281 }
282 }
283}
284
285static u32
286bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
287{
288 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000289 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700290
291 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
292 return 0;
293
294 sent = bnad_free_txbufs(bnad, tcb);
295 if (sent) {
296 if (netif_queue_stopped(netdev) &&
297 netif_carrier_ok(netdev) &&
298 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
299 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000300 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
301 netif_wake_queue(netdev);
302 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
303 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700304 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000305 }
306
307 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700308 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700309
310 smp_mb__before_clear_bit();
311 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
312
313 return sent;
314}
315
316/* MSIX Tx Completion Handler */
317static irqreturn_t
318bnad_msix_tx(int irq, void *data)
319{
320 struct bna_tcb *tcb = (struct bna_tcb *)data;
321 struct bnad *bnad = tcb->bnad;
322
323 bnad_tx(bnad, tcb);
324
325 return IRQ_HANDLED;
326}
327
328static void
329bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
330{
331 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
332
333 rcb->producer_index = 0;
334 rcb->consumer_index = 0;
335
336 unmap_q->producer_index = 0;
337 unmap_q->consumer_index = 0;
338}
339
340static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000341bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700342{
343 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000344 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700345 struct sk_buff *skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000346 int unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700347
348 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000349 unmap_array = unmap_q->unmap_array;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000350 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000351 skb = unmap_array[unmap_cons].skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000352 if (!skb)
353 continue;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000354 unmap_array[unmap_cons].skb = NULL;
355 dma_unmap_single(&bnad->pcidev->dev,
356 dma_unmap_addr(&unmap_array[unmap_cons],
357 dma_addr),
358 rcb->rxq->buffer_size,
359 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700360 dev_kfree_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700361 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700362 bnad_reset_rcb(bnad, rcb);
363}
364
365static void
366bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
367{
368 u16 to_alloc, alloced, unmap_prod, wi_range;
369 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
370 struct bnad_skb_unmap *unmap_array;
371 struct bna_rxq_entry *rxent;
372 struct sk_buff *skb;
373 dma_addr_t dma_addr;
374
375 alloced = 0;
376 to_alloc =
377 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
378
379 unmap_array = unmap_q->unmap_array;
380 unmap_prod = unmap_q->producer_index;
381
382 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
383
384 while (to_alloc--) {
385 if (!wi_range) {
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
387 wi_range);
388 }
389 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
390 GFP_ATOMIC);
391 if (unlikely(!skb)) {
392 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
393 goto finishing;
394 }
395 skb->dev = bnad->netdev;
396 skb_reserve(skb, NET_IP_ALIGN);
397 unmap_array[unmap_prod].skb = skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000398 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
399 rcb->rxq->buffer_size,
400 DMA_FROM_DEVICE);
401 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700402 dma_addr);
403 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
404 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
405
406 rxent++;
407 wi_range--;
408 alloced++;
409 }
410
411finishing:
412 if (likely(alloced)) {
413 unmap_q->producer_index = unmap_prod;
414 rcb->producer_index = unmap_prod;
415 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000416 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
417 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700418 }
419}
420
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700421static inline void
422bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
423{
424 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
425
426 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
427 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
428 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
429 bnad_alloc_n_post_rxbufs(bnad, rcb);
430 smp_mb__before_clear_bit();
431 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
432 }
433}
434
435static u32
436bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
437{
438 struct bna_cq_entry *cmpl, *next_cmpl;
439 struct bna_rcb *rcb = NULL;
440 unsigned int wi_range, packets = 0, wis = 0;
441 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000442 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700443 struct sk_buff *skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000444 u32 flags, unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700445 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
446 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
447
Rasesh Modybe7fa322010-12-23 21:45:01 +0000448 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
449 return 0;
450
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
453 wi_range);
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
456 packets++;
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
458
459 if (qid0 == cmpl->rxq_id)
460 rcb = ccb->rcb[0];
461 else
462 rcb = ccb->rcb[1];
463
464 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000465 unmap_array = unmap_q->unmap_array;
466 unmap_cons = unmap_q->consumer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700467
Ivan Vecera5ea74312011-02-02 04:37:02 +0000468 skb = unmap_array[unmap_cons].skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700469 BUG_ON(!(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +0000470 unmap_array[unmap_cons].skb = NULL;
471 dma_unmap_single(&bnad->pcidev->dev,
472 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700473 dma_addr),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000474 rcb->rxq->buffer_size,
475 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
477
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
480
481 wis++;
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
484 else {
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
486 wis = 0;
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
490 }
491 prefetch(next_cmpl);
492
493 flags = ntohl(cmpl->flags);
494 if (unlikely
495 (flags &
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
500 goto next;
501 }
502
503 skb_put(skb, ntohs(cmpl->length));
504 if (likely
Michał Mirosławe5ee20e2011-04-12 09:38:23 +0000505 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
512 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700513 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700514
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
518
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
525 else
526 vlan_hwaccel_receive_skb(skb,
527 bnad->vlan_grp,
528 ntohs(cmpl->vlan_tag));
529
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
535 else
536 netif_receive_skb(skb);
537 }
538
539next:
540 cmpl->valid = 0;
541 cmpl = next_cmpl;
542 }
543
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
545
546 if (likely(ccb)) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548 bna_ib_ack(ccb->i_dbell, packets);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700549 bnad_refill_rxq(bnad, ccb->rcb[0]);
550 if (ccb->rcb[1])
551 bnad_refill_rxq(bnad, ccb->rcb[1]);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000552 } else {
553 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
554 bna_ib_ack(ccb->i_dbell, 0);
555 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700556
557 return packets;
558}
559
560static void
561bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
562{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000563 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
564 return;
565
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700566 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
567 bna_ib_ack(ccb->i_dbell, 0);
568}
569
570static void
571bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
572{
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000573 unsigned long flags;
574
Rasesh Modyaad75b62010-12-23 21:45:08 +0000575 /* Because of polling context */
576 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700577 bnad_enable_rx_irq_unsafe(ccb);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000578 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700579}
580
581static void
582bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
583{
584 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000585 struct napi_struct *napi = &rx_ctrl->napi;
586
587 if (likely(napi_schedule_prep(napi))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700588 bnad_disable_rx_irq(bnad, ccb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000589 __napi_schedule(napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700590 }
591 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
592}
593
594/* MSIX Rx Path Handler */
595static irqreturn_t
596bnad_msix_rx(int irq, void *data)
597{
598 struct bna_ccb *ccb = (struct bna_ccb *)data;
599 struct bnad *bnad = ccb->bnad;
600
601 bnad_netif_rx_schedule_poll(bnad, ccb);
602
603 return IRQ_HANDLED;
604}
605
606/* Interrupt handlers */
607
608/* Mbox Interrupt Handlers */
609static irqreturn_t
610bnad_msix_mbox_handler(int irq, void *data)
611{
612 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000613 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000614 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700615
Rasesh Modybe7fa322010-12-23 21:45:01 +0000616 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
617 return IRQ_HANDLED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700618
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700619 spin_lock_irqsave(&bnad->bna_lock, flags);
620
621 bna_intr_status_get(&bnad->bna, intr_status);
622
623 if (BNA_IS_MBOX_ERR_INTR(intr_status))
624 bna_mbox_handler(&bnad->bna, intr_status);
625
626 spin_unlock_irqrestore(&bnad->bna_lock, flags);
627
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700628 return IRQ_HANDLED;
629}
630
631static irqreturn_t
632bnad_isr(int irq, void *data)
633{
634 int i, j;
635 u32 intr_status;
636 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000637 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700638 struct bnad_rx_info *rx_info;
639 struct bnad_rx_ctrl *rx_ctrl;
640
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000641 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
642 return IRQ_NONE;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700643
644 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000645
646 if (unlikely(!intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700647 return IRQ_NONE;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000648
649 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700650
Rasesh Modybe7fa322010-12-23 21:45:01 +0000651 if (BNA_IS_MBOX_ERR_INTR(intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700652 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000653
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700654 spin_unlock_irqrestore(&bnad->bna_lock, flags);
655
Rasesh Modybe7fa322010-12-23 21:45:01 +0000656 if (!BNA_IS_INTX_DATA_INTR(intr_status))
657 return IRQ_HANDLED;
658
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700659 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000660 /* Tx processing */
661 for (i = 0; i < bnad->num_tx; i++) {
662 for (j = 0; j < bnad->num_txq_per_tx; j++)
663 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
664 }
665 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700666 for (i = 0; i < bnad->num_rx; i++) {
667 rx_info = &bnad->rx_info[i];
668 if (!rx_info->rx)
669 continue;
670 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
671 rx_ctrl = &rx_info->rx_ctrl[j];
672 if (rx_ctrl->ccb)
673 bnad_netif_rx_schedule_poll(bnad,
674 rx_ctrl->ccb);
675 }
676 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700677 return IRQ_HANDLED;
678}
679
680/*
681 * Called in interrupt / callback context
682 * with bna_lock held, so cfg_flags access is OK
683 */
684static void
685bnad_enable_mbox_irq(struct bnad *bnad)
686{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000687 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000688
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700689 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
690}
691
692/*
693 * Called with bnad->bna_lock held b'cos of
694 * bnad->cfg_flags access.
695 */
Rasesh Modyb7ee31c2010-10-05 15:46:05 +0000696static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700697bnad_disable_mbox_irq(struct bnad *bnad)
698{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000699 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000700
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700701 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
702}
703
Rasesh Modybe7fa322010-12-23 21:45:01 +0000704static void
705bnad_set_netdev_perm_addr(struct bnad *bnad)
706{
707 struct net_device *netdev = bnad->netdev;
708
709 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
710 if (is_zero_ether_addr(netdev->dev_addr))
711 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
712}
713
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700714/* Control Path Handlers */
715
716/* Callbacks */
717void
718bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
719{
720 bnad_enable_mbox_irq(bnad);
721}
722
723void
724bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
725{
726 bnad_disable_mbox_irq(bnad);
727}
728
729void
730bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
731{
732 complete(&bnad->bnad_completions.ioc_comp);
733 bnad->bnad_completions.ioc_comp_status = status;
734}
735
736void
737bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
738{
739 complete(&bnad->bnad_completions.ioc_comp);
740 bnad->bnad_completions.ioc_comp_status = status;
741}
742
743static void
744bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
745{
746 struct bnad *bnad = (struct bnad *)arg;
747
748 complete(&bnad->bnad_completions.port_comp);
749
750 netif_carrier_off(bnad->netdev);
751}
752
753void
754bnad_cb_port_link_status(struct bnad *bnad,
755 enum bna_link_status link_status)
756{
757 bool link_up = 0;
758
759 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
760
761 if (link_status == BNA_CEE_UP) {
762 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
763 BNAD_UPDATE_CTR(bnad, cee_up);
764 } else
765 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
766
767 if (link_up) {
768 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000769 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
770 if (!tcb)
771 return;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700772 pr_warn("bna: %s link up\n",
773 bnad->netdev->name);
774 netif_carrier_on(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000776 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700777 /* Force an immediate Transmit Schedule */
778 pr_info("bna: %s TX_STARTED\n",
779 bnad->netdev->name);
780 netif_wake_queue(bnad->netdev);
781 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
782 } else {
783 netif_stop_queue(bnad->netdev);
784 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
785 }
786 }
787 } else {
788 if (netif_carrier_ok(bnad->netdev)) {
789 pr_warn("bna: %s link down\n",
790 bnad->netdev->name);
791 netif_carrier_off(bnad->netdev);
792 BNAD_UPDATE_CTR(bnad, link_toggle);
793 }
794 }
795}
796
797static void
798bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
799 enum bna_cb_status status)
800{
801 struct bnad *bnad = (struct bnad *)arg;
802
803 complete(&bnad->bnad_completions.tx_comp);
804}
805
806static void
807bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
808{
809 struct bnad_tx_info *tx_info =
810 (struct bnad_tx_info *)tcb->txq->tx->priv;
811 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
812
813 tx_info->tcb[tcb->id] = tcb;
814 unmap_q->producer_index = 0;
815 unmap_q->consumer_index = 0;
816 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
817}
818
819static void
820bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
821{
822 struct bnad_tx_info *tx_info =
823 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000824 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
825
826 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
827 cpu_relax();
828
829 bnad_free_all_txbufs(bnad, tcb);
830
831 unmap_q->producer_index = 0;
832 unmap_q->consumer_index = 0;
833
834 smp_mb__before_clear_bit();
835 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700836
837 tx_info->tcb[tcb->id] = NULL;
838}
839
840static void
841bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
842{
843 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
844
845 unmap_q->producer_index = 0;
846 unmap_q->consumer_index = 0;
847 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
848}
849
850static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000851bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
852{
853 bnad_free_all_rxbufs(bnad, rcb);
854}
855
856static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700857bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
858{
859 struct bnad_rx_info *rx_info =
860 (struct bnad_rx_info *)ccb->cq->rx->priv;
861
862 rx_info->rx_ctrl[ccb->id].ccb = ccb;
863 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
864}
865
866static void
867bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
868{
869 struct bnad_rx_info *rx_info =
870 (struct bnad_rx_info *)ccb->cq->rx->priv;
871
872 rx_info->rx_ctrl[ccb->id].ccb = NULL;
873}
874
875static void
876bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
877{
878 struct bnad_tx_info *tx_info =
879 (struct bnad_tx_info *)tcb->txq->tx->priv;
880
881 if (tx_info != &bnad->tx_info[0])
882 return;
883
Rasesh Modybe7fa322010-12-23 21:45:01 +0000884 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700885 netif_stop_queue(bnad->netdev);
886 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
887}
888
889static void
890bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
891{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000892 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
893
894 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700895 return;
896
Rasesh Modybe7fa322010-12-23 21:45:01 +0000897 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
898
899 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
900 cpu_relax();
901
902 bnad_free_all_txbufs(bnad, tcb);
903
904 unmap_q->producer_index = 0;
905 unmap_q->consumer_index = 0;
906
907 smp_mb__before_clear_bit();
908 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
909
910 /*
911 * Workaround for first device enable failure & we
912 * get a 0 MAC address. We try to get the MAC address
913 * again here.
914 */
915 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
916 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
917 bnad_set_netdev_perm_addr(bnad);
918 }
919
920 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
921
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700922 if (netif_carrier_ok(bnad->netdev)) {
923 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
924 netif_wake_queue(bnad->netdev);
925 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
926 }
927}
928
929static void
930bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
931{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000932 /* Delay only once for the whole Tx Path Shutdown */
933 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
934 mdelay(BNAD_TXRX_SYNC_MDELAY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700935}
936
937static void
938bnad_cb_rx_cleanup(struct bnad *bnad,
939 struct bna_ccb *ccb)
940{
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700941 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
942
Rasesh Modybe7fa322010-12-23 21:45:01 +0000943 if (ccb->rcb[1])
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700944 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000945
946 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
947 mdelay(BNAD_TXRX_SYNC_MDELAY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700948}
949
950static void
951bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
952{
953 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
954
Rasesh Modybe7fa322010-12-23 21:45:01 +0000955 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
956
957 if (rcb == rcb->cq->ccb->rcb[0])
958 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
959
960 bnad_free_all_rxbufs(bnad, rcb);
961
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700962 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
963
964 /* Now allocate & post buffers for this RCB */
965 /* !!Allocation in callback context */
966 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
967 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
968 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
969 bnad_alloc_n_post_rxbufs(bnad, rcb);
970 smp_mb__before_clear_bit();
971 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
972 }
973}
974
975static void
976bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
977 enum bna_cb_status status)
978{
979 struct bnad *bnad = (struct bnad *)arg;
980
981 complete(&bnad->bnad_completions.rx_comp);
982}
983
984static void
985bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
986 enum bna_cb_status status)
987{
988 bnad->bnad_completions.mcast_comp_status = status;
989 complete(&bnad->bnad_completions.mcast_comp);
990}
991
992void
993bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
994 struct bna_stats *stats)
995{
996 if (status == BNA_CB_SUCCESS)
997 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
998
999 if (!netif_running(bnad->netdev) ||
1000 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1001 return;
1002
1003 mod_timer(&bnad->stats_timer,
1004 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1005}
1006
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001007/* Resource allocation, free functions */
1008
1009static void
1010bnad_mem_free(struct bnad *bnad,
1011 struct bna_mem_info *mem_info)
1012{
1013 int i;
1014 dma_addr_t dma_pa;
1015
1016 if (mem_info->mdl == NULL)
1017 return;
1018
1019 for (i = 0; i < mem_info->num; i++) {
1020 if (mem_info->mdl[i].kva != NULL) {
1021 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1022 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1023 dma_pa);
Ivan Vecera5ea74312011-02-02 04:37:02 +00001024 dma_free_coherent(&bnad->pcidev->dev,
1025 mem_info->mdl[i].len,
1026 mem_info->mdl[i].kva, dma_pa);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001027 } else
1028 kfree(mem_info->mdl[i].kva);
1029 }
1030 }
1031 kfree(mem_info->mdl);
1032 mem_info->mdl = NULL;
1033}
1034
1035static int
1036bnad_mem_alloc(struct bnad *bnad,
1037 struct bna_mem_info *mem_info)
1038{
1039 int i;
1040 dma_addr_t dma_pa;
1041
1042 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1043 mem_info->mdl = NULL;
1044 return 0;
1045 }
1046
1047 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1048 GFP_KERNEL);
1049 if (mem_info->mdl == NULL)
1050 return -ENOMEM;
1051
1052 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1053 for (i = 0; i < mem_info->num; i++) {
1054 mem_info->mdl[i].len = mem_info->len;
1055 mem_info->mdl[i].kva =
Ivan Vecera5ea74312011-02-02 04:37:02 +00001056 dma_alloc_coherent(&bnad->pcidev->dev,
1057 mem_info->len, &dma_pa,
1058 GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001059
1060 if (mem_info->mdl[i].kva == NULL)
1061 goto err_return;
1062
1063 BNA_SET_DMA_ADDR(dma_pa,
1064 &(mem_info->mdl[i].dma));
1065 }
1066 } else {
1067 for (i = 0; i < mem_info->num; i++) {
1068 mem_info->mdl[i].len = mem_info->len;
1069 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1070 GFP_KERNEL);
1071 if (mem_info->mdl[i].kva == NULL)
1072 goto err_return;
1073 }
1074 }
1075
1076 return 0;
1077
1078err_return:
1079 bnad_mem_free(bnad, mem_info);
1080 return -ENOMEM;
1081}
1082
1083/* Free IRQ for Mailbox */
1084static void
1085bnad_mbox_irq_free(struct bnad *bnad,
1086 struct bna_intr_info *intr_info)
1087{
1088 int irq;
1089 unsigned long flags;
1090
1091 if (intr_info->idl == NULL)
1092 return;
1093
1094 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001095 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001096 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001097
1098 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001099 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001100
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001101 kfree(intr_info->idl);
1102}
1103
1104/*
1105 * Allocates IRQ for Mailbox, but keep it disabled
1106 * This will be enabled once we get the mbox enable callback
1107 * from bna
1108 */
1109static int
1110bnad_mbox_irq_alloc(struct bnad *bnad,
1111 struct bna_intr_info *intr_info)
1112{
Rasesh Modybe7fa322010-12-23 21:45:01 +00001113 int err = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001114 unsigned long flags;
1115 u32 irq;
1116 irq_handler_t irq_handler;
1117
1118 /* Mbox should use only 1 vector */
1119
1120 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1121 if (!intr_info->idl)
1122 return -ENOMEM;
1123
1124 spin_lock_irqsave(&bnad->bna_lock, flags);
1125 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1126 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1127 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1128 flags = 0;
1129 intr_info->intr_type = BNA_INTR_T_MSIX;
1130 intr_info->idl[0].vector = bnad->msix_num - 1;
1131 } else {
1132 irq_handler = (irq_handler_t)bnad_isr;
1133 irq = bnad->pcidev->irq;
1134 flags = IRQF_SHARED;
1135 intr_info->intr_type = BNA_INTR_T_INTX;
1136 /* intr_info->idl.vector = 0 ? */
1137 }
1138 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1139
1140 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1141
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001142 /*
1143 * Set the Mbox IRQ disable flag, so that the IRQ handler
1144 * called from request_irq() for SHARED IRQs do not execute
1145 */
1146 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1147
Rasesh Modybe7fa322010-12-23 21:45:01 +00001148 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1149
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001150 err = request_irq(irq, irq_handler, flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001151 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001152
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001153 if (err) {
1154 kfree(intr_info->idl);
1155 intr_info->idl = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001156 }
1157
Rasesh Modybe7fa322010-12-23 21:45:01 +00001158 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001159}
1160
1161static void
1162bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1163{
1164 kfree(intr_info->idl);
1165 intr_info->idl = NULL;
1166}
1167
1168/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1169static int
1170bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1171 uint txrx_id, struct bna_intr_info *intr_info)
1172{
1173 int i, vector_start = 0;
1174 u32 cfg_flags;
1175 unsigned long flags;
1176
1177 spin_lock_irqsave(&bnad->bna_lock, flags);
1178 cfg_flags = bnad->cfg_flags;
1179 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1180
1181 if (cfg_flags & BNAD_CF_MSIX) {
1182 intr_info->intr_type = BNA_INTR_T_MSIX;
1183 intr_info->idl = kcalloc(intr_info->num,
1184 sizeof(struct bna_intr_descr),
1185 GFP_KERNEL);
1186 if (!intr_info->idl)
1187 return -ENOMEM;
1188
1189 switch (src) {
1190 case BNAD_INTR_TX:
1191 vector_start = txrx_id;
1192 break;
1193
1194 case BNAD_INTR_RX:
1195 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1196 txrx_id;
1197 break;
1198
1199 default:
1200 BUG();
1201 }
1202
1203 for (i = 0; i < intr_info->num; i++)
1204 intr_info->idl[i].vector = vector_start + i;
1205 } else {
1206 intr_info->intr_type = BNA_INTR_T_INTX;
1207 intr_info->num = 1;
1208 intr_info->idl = kcalloc(intr_info->num,
1209 sizeof(struct bna_intr_descr),
1210 GFP_KERNEL);
1211 if (!intr_info->idl)
1212 return -ENOMEM;
1213
1214 switch (src) {
1215 case BNAD_INTR_TX:
1216 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1217 break;
1218
1219 case BNAD_INTR_RX:
1220 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1221 break;
1222 }
1223 }
1224 return 0;
1225}
1226
1227/**
1228 * NOTE: Should be called for MSIX only
1229 * Unregisters Tx MSIX vector(s) from the kernel
1230 */
1231static void
1232bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1233 int num_txqs)
1234{
1235 int i;
1236 int vector_num;
1237
1238 for (i = 0; i < num_txqs; i++) {
1239 if (tx_info->tcb[i] == NULL)
1240 continue;
1241
1242 vector_num = tx_info->tcb[i]->intr_vector;
1243 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1244 }
1245}
1246
1247/**
1248 * NOTE: Should be called for MSIX only
1249 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1250 */
1251static int
1252bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1253 uint tx_id, int num_txqs)
1254{
1255 int i;
1256 int err;
1257 int vector_num;
1258
1259 for (i = 0; i < num_txqs; i++) {
1260 vector_num = tx_info->tcb[i]->intr_vector;
1261 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1262 tx_id + tx_info->tcb[i]->id);
1263 err = request_irq(bnad->msix_table[vector_num].vector,
1264 (irq_handler_t)bnad_msix_tx, 0,
1265 tx_info->tcb[i]->name,
1266 tx_info->tcb[i]);
1267 if (err)
1268 goto err_return;
1269 }
1270
1271 return 0;
1272
1273err_return:
1274 if (i > 0)
1275 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1276 return -1;
1277}
1278
1279/**
1280 * NOTE: Should be called for MSIX only
1281 * Unregisters Rx MSIX vector(s) from the kernel
1282 */
1283static void
1284bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1285 int num_rxps)
1286{
1287 int i;
1288 int vector_num;
1289
1290 for (i = 0; i < num_rxps; i++) {
1291 if (rx_info->rx_ctrl[i].ccb == NULL)
1292 continue;
1293
1294 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1295 free_irq(bnad->msix_table[vector_num].vector,
1296 rx_info->rx_ctrl[i].ccb);
1297 }
1298}
1299
1300/**
1301 * NOTE: Should be called for MSIX only
1302 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1303 */
1304static int
1305bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1306 uint rx_id, int num_rxps)
1307{
1308 int i;
1309 int err;
1310 int vector_num;
1311
1312 for (i = 0; i < num_rxps; i++) {
1313 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1314 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1315 bnad->netdev->name,
1316 rx_id + rx_info->rx_ctrl[i].ccb->id);
1317 err = request_irq(bnad->msix_table[vector_num].vector,
1318 (irq_handler_t)bnad_msix_rx, 0,
1319 rx_info->rx_ctrl[i].ccb->name,
1320 rx_info->rx_ctrl[i].ccb);
1321 if (err)
1322 goto err_return;
1323 }
1324
1325 return 0;
1326
1327err_return:
1328 if (i > 0)
1329 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1330 return -1;
1331}
1332
1333/* Free Tx object Resources */
1334static void
1335bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1336{
1337 int i;
1338
1339 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1340 if (res_info[i].res_type == BNA_RES_T_MEM)
1341 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1342 else if (res_info[i].res_type == BNA_RES_T_INTR)
1343 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1344 }
1345}
1346
1347/* Allocates memory and interrupt resources for Tx object */
1348static int
1349bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1350 uint tx_id)
1351{
1352 int i, err = 0;
1353
1354 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1355 if (res_info[i].res_type == BNA_RES_T_MEM)
1356 err = bnad_mem_alloc(bnad,
1357 &res_info[i].res_u.mem_info);
1358 else if (res_info[i].res_type == BNA_RES_T_INTR)
1359 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1360 &res_info[i].res_u.intr_info);
1361 if (err)
1362 goto err_return;
1363 }
1364 return 0;
1365
1366err_return:
1367 bnad_tx_res_free(bnad, res_info);
1368 return err;
1369}
1370
1371/* Free Rx object Resources */
1372static void
1373bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1374{
1375 int i;
1376
1377 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1378 if (res_info[i].res_type == BNA_RES_T_MEM)
1379 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1380 else if (res_info[i].res_type == BNA_RES_T_INTR)
1381 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1382 }
1383}
1384
1385/* Allocates memory and interrupt resources for Rx object */
1386static int
1387bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1388 uint rx_id)
1389{
1390 int i, err = 0;
1391
1392 /* All memory needs to be allocated before setup_ccbs */
1393 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1394 if (res_info[i].res_type == BNA_RES_T_MEM)
1395 err = bnad_mem_alloc(bnad,
1396 &res_info[i].res_u.mem_info);
1397 else if (res_info[i].res_type == BNA_RES_T_INTR)
1398 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1399 &res_info[i].res_u.intr_info);
1400 if (err)
1401 goto err_return;
1402 }
1403 return 0;
1404
1405err_return:
1406 bnad_rx_res_free(bnad, res_info);
1407 return err;
1408}
1409
1410/* Timer callbacks */
1411/* a) IOC timer */
1412static void
1413bnad_ioc_timeout(unsigned long data)
1414{
1415 struct bnad *bnad = (struct bnad *)data;
1416 unsigned long flags;
1417
1418 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8a891422010-08-25 23:00:27 -07001419 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001420 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1421}
1422
1423static void
1424bnad_ioc_hb_check(unsigned long data)
1425{
1426 struct bnad *bnad = (struct bnad *)data;
1427 unsigned long flags;
1428
1429 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8a891422010-08-25 23:00:27 -07001430 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001431 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1432}
1433
1434static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001435bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001436{
1437 struct bnad *bnad = (struct bnad *)data;
1438 unsigned long flags;
1439
1440 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001441 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1442 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1443}
1444
1445static void
1446bnad_iocpf_sem_timeout(unsigned long data)
1447{
1448 struct bnad *bnad = (struct bnad *)data;
1449 unsigned long flags;
1450
1451 spin_lock_irqsave(&bnad->bna_lock, flags);
1452 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001453 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1454}
1455
1456/*
1457 * All timer routines use bnad->bna_lock to protect against
1458 * the following race, which may occur in case of no locking:
1459 * Time CPU m CPU n
1460 * 0 1 = test_bit
1461 * 1 clear_bit
1462 * 2 del_timer_sync
1463 * 3 mod_timer
1464 */
1465
1466/* b) Dynamic Interrupt Moderation Timer */
1467static void
1468bnad_dim_timeout(unsigned long data)
1469{
1470 struct bnad *bnad = (struct bnad *)data;
1471 struct bnad_rx_info *rx_info;
1472 struct bnad_rx_ctrl *rx_ctrl;
1473 int i, j;
1474 unsigned long flags;
1475
1476 if (!netif_carrier_ok(bnad->netdev))
1477 return;
1478
1479 spin_lock_irqsave(&bnad->bna_lock, flags);
1480 for (i = 0; i < bnad->num_rx; i++) {
1481 rx_info = &bnad->rx_info[i];
1482 if (!rx_info->rx)
1483 continue;
1484 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1485 rx_ctrl = &rx_info->rx_ctrl[j];
1486 if (!rx_ctrl->ccb)
1487 continue;
1488 bna_rx_dim_update(rx_ctrl->ccb);
1489 }
1490 }
1491
1492 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1493 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1494 mod_timer(&bnad->dim_timer,
1495 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1496 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1497}
1498
1499/* c) Statistics Timer */
1500static void
1501bnad_stats_timeout(unsigned long data)
1502{
1503 struct bnad *bnad = (struct bnad *)data;
1504 unsigned long flags;
1505
1506 if (!netif_running(bnad->netdev) ||
1507 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1508 return;
1509
1510 spin_lock_irqsave(&bnad->bna_lock, flags);
1511 bna_stats_get(&bnad->bna);
1512 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1513}
1514
1515/*
1516 * Set up timer for DIM
1517 * Called with bnad->bna_lock held
1518 */
1519void
1520bnad_dim_timer_start(struct bnad *bnad)
1521{
1522 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1523 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1524 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1525 (unsigned long)bnad);
1526 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1527 mod_timer(&bnad->dim_timer,
1528 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1529 }
1530}
1531
1532/*
1533 * Set up timer for statistics
1534 * Called with mutex_lock(&bnad->conf_mutex) held
1535 */
1536static void
1537bnad_stats_timer_start(struct bnad *bnad)
1538{
1539 unsigned long flags;
1540
1541 spin_lock_irqsave(&bnad->bna_lock, flags);
1542 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1543 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1544 (unsigned long)bnad);
1545 mod_timer(&bnad->stats_timer,
1546 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1547 }
1548 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001549}
1550
1551/*
1552 * Stops the stats timer
1553 * Called with mutex_lock(&bnad->conf_mutex) held
1554 */
1555static void
1556bnad_stats_timer_stop(struct bnad *bnad)
1557{
1558 int to_del = 0;
1559 unsigned long flags;
1560
1561 spin_lock_irqsave(&bnad->bna_lock, flags);
1562 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1563 to_del = 1;
1564 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1565 if (to_del)
1566 del_timer_sync(&bnad->stats_timer);
1567}
1568
1569/* Utilities */
1570
1571static void
1572bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1573{
1574 int i = 1; /* Index 0 has broadcast address */
1575 struct netdev_hw_addr *mc_addr;
1576
1577 netdev_for_each_mc_addr(mc_addr, netdev) {
1578 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1579 ETH_ALEN);
1580 i++;
1581 }
1582}
1583
1584static int
1585bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1586{
1587 struct bnad_rx_ctrl *rx_ctrl =
1588 container_of(napi, struct bnad_rx_ctrl, napi);
1589 struct bna_ccb *ccb;
1590 struct bnad *bnad;
1591 int rcvd = 0;
1592
1593 ccb = rx_ctrl->ccb;
1594
1595 bnad = ccb->bnad;
1596
1597 if (!netif_carrier_ok(bnad->netdev))
1598 goto poll_exit;
1599
1600 rcvd = bnad_poll_cq(bnad, ccb, budget);
1601 if (rcvd == budget)
1602 return rcvd;
1603
1604poll_exit:
1605 napi_complete((napi));
1606
1607 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1608
1609 bnad_enable_rx_irq(bnad, ccb);
1610 return rcvd;
1611}
1612
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001613static void
1614bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1615{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001616 struct bnad_rx_ctrl *rx_ctrl;
1617 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001618
1619 /* Initialize & enable NAPI */
1620 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1621 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
Rasesh Modybe7fa322010-12-23 21:45:01 +00001622
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001623 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001624 bnad_napi_poll_rx, 64);
1625
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001626 napi_enable(&rx_ctrl->napi);
1627 }
1628}
1629
1630static void
1631bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1632{
1633 int i;
1634
1635 /* First disable and then clean up */
1636 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1637 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1638 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1639 }
1640}
1641
1642/* Should be held with conf_lock held */
1643void
1644bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1645{
1646 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1647 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1648 unsigned long flags;
1649
1650 if (!tx_info->tx)
1651 return;
1652
1653 init_completion(&bnad->bnad_completions.tx_comp);
1654 spin_lock_irqsave(&bnad->bna_lock, flags);
1655 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1656 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1657 wait_for_completion(&bnad->bnad_completions.tx_comp);
1658
1659 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1660 bnad_tx_msix_unregister(bnad, tx_info,
1661 bnad->num_txq_per_tx);
1662
1663 spin_lock_irqsave(&bnad->bna_lock, flags);
1664 bna_tx_destroy(tx_info->tx);
1665 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1666
1667 tx_info->tx = NULL;
1668
1669 if (0 == tx_id)
1670 tasklet_kill(&bnad->tx_free_tasklet);
1671
1672 bnad_tx_res_free(bnad, res_info);
1673}
1674
1675/* Should be held with conf_lock held */
1676int
1677bnad_setup_tx(struct bnad *bnad, uint tx_id)
1678{
1679 int err;
1680 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1681 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1682 struct bna_intr_info *intr_info =
1683 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1684 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1685 struct bna_tx_event_cbfn tx_cbfn;
1686 struct bna_tx *tx;
1687 unsigned long flags;
1688
1689 /* Initialize the Tx object configuration */
1690 tx_config->num_txq = bnad->num_txq_per_tx;
1691 tx_config->txq_depth = bnad->txq_depth;
1692 tx_config->tx_type = BNA_TX_T_REGULAR;
1693
1694 /* Initialize the tx event handlers */
1695 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1696 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1697 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1698 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1699 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1700
1701 /* Get BNA's resource requirement for one tx object */
1702 spin_lock_irqsave(&bnad->bna_lock, flags);
1703 bna_tx_res_req(bnad->num_txq_per_tx,
1704 bnad->txq_depth, res_info);
1705 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706
1707 /* Fill Unmap Q memory requirements */
1708 BNAD_FILL_UNMAPQ_MEM_REQ(
1709 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1710 bnad->num_txq_per_tx,
1711 BNAD_TX_UNMAPQ_DEPTH);
1712
1713 /* Allocate resources */
1714 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1715 if (err)
1716 return err;
1717
1718 /* Ask BNA to create one Tx object, supplying required resources */
1719 spin_lock_irqsave(&bnad->bna_lock, flags);
1720 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1721 tx_info);
1722 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1723 if (!tx)
1724 goto err_return;
1725 tx_info->tx = tx;
1726
1727 /* Register ISR for the Tx object */
1728 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1729 err = bnad_tx_msix_register(bnad, tx_info,
1730 tx_id, bnad->num_txq_per_tx);
1731 if (err)
1732 goto err_return;
1733 }
1734
1735 spin_lock_irqsave(&bnad->bna_lock, flags);
1736 bna_tx_enable(tx);
1737 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1738
1739 return 0;
1740
1741err_return:
1742 bnad_tx_res_free(bnad, res_info);
1743 return err;
1744}
1745
1746/* Setup the rx config for bna_rx_create */
1747/* bnad decides the configuration */
1748static void
1749bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1750{
1751 rx_config->rx_type = BNA_RX_T_REGULAR;
1752 rx_config->num_paths = bnad->num_rxp_per_rx;
1753
1754 if (bnad->num_rxp_per_rx > 1) {
1755 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1756 rx_config->rss_config.hash_type =
1757 (BFI_RSS_T_V4_TCP |
1758 BFI_RSS_T_V6_TCP |
1759 BFI_RSS_T_V4_IP |
1760 BFI_RSS_T_V6_IP);
1761 rx_config->rss_config.hash_mask =
1762 bnad->num_rxp_per_rx - 1;
1763 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1764 sizeof(rx_config->rss_config.toeplitz_hash_key));
1765 } else {
1766 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1767 memset(&rx_config->rss_config, 0,
1768 sizeof(rx_config->rss_config));
1769 }
1770 rx_config->rxp_type = BNA_RXP_SLR;
1771 rx_config->q_depth = bnad->rxq_depth;
1772
1773 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1774
1775 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1776}
1777
1778/* Called with mutex_lock(&bnad->conf_mutex) held */
1779void
1780bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1781{
1782 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1783 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1784 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1785 unsigned long flags;
1786 int dim_timer_del = 0;
1787
1788 if (!rx_info->rx)
1789 return;
1790
1791 if (0 == rx_id) {
1792 spin_lock_irqsave(&bnad->bna_lock, flags);
1793 dim_timer_del = bnad_dim_timer_running(bnad);
1794 if (dim_timer_del)
1795 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797 if (dim_timer_del)
1798 del_timer_sync(&bnad->dim_timer);
1799 }
1800
1801 bnad_napi_disable(bnad, rx_id);
1802
1803 init_completion(&bnad->bnad_completions.rx_comp);
1804 spin_lock_irqsave(&bnad->bna_lock, flags);
1805 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1806 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1807 wait_for_completion(&bnad->bnad_completions.rx_comp);
1808
1809 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1810 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1811
1812 spin_lock_irqsave(&bnad->bna_lock, flags);
1813 bna_rx_destroy(rx_info->rx);
1814 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1815
1816 rx_info->rx = NULL;
1817
1818 bnad_rx_res_free(bnad, res_info);
1819}
1820
1821/* Called with mutex_lock(&bnad->conf_mutex) held */
1822int
1823bnad_setup_rx(struct bnad *bnad, uint rx_id)
1824{
1825 int err;
1826 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1827 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1828 struct bna_intr_info *intr_info =
1829 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1830 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1831 struct bna_rx_event_cbfn rx_cbfn;
1832 struct bna_rx *rx;
1833 unsigned long flags;
1834
1835 /* Initialize the Rx object configuration */
1836 bnad_init_rx_config(bnad, rx_config);
1837
1838 /* Initialize the Rx event handlers */
1839 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001840 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001841 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1842 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1843 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1844 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1845
1846 /* Get BNA's resource requirement for one Rx object */
1847 spin_lock_irqsave(&bnad->bna_lock, flags);
1848 bna_rx_res_req(rx_config, res_info);
1849 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850
1851 /* Fill Unmap Q memory requirements */
1852 BNAD_FILL_UNMAPQ_MEM_REQ(
1853 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1854 rx_config->num_paths +
1855 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1856 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1857
1858 /* Allocate resource */
1859 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1860 if (err)
1861 return err;
1862
1863 /* Ask BNA to create one Rx object, supplying required resources */
1864 spin_lock_irqsave(&bnad->bna_lock, flags);
1865 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1866 rx_info);
1867 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1868 if (!rx)
1869 goto err_return;
1870 rx_info->rx = rx;
1871
1872 /* Register ISR for the Rx object */
1873 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1874 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1875 rx_config->num_paths);
1876 if (err)
1877 goto err_return;
1878 }
1879
1880 /* Enable NAPI */
1881 bnad_napi_enable(bnad, rx_id);
1882
1883 spin_lock_irqsave(&bnad->bna_lock, flags);
1884 if (0 == rx_id) {
1885 /* Set up Dynamic Interrupt Moderation Vector */
1886 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1887 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1888
1889 /* Enable VLAN filtering only on the default Rx */
1890 bna_rx_vlanfilter_enable(rx);
1891
1892 /* Start the DIM timer */
1893 bnad_dim_timer_start(bnad);
1894 }
1895
1896 bna_rx_enable(rx);
1897 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1898
1899 return 0;
1900
1901err_return:
1902 bnad_cleanup_rx(bnad, rx_id);
1903 return err;
1904}
1905
1906/* Called with conf_lock & bnad->bna_lock held */
1907void
1908bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1909{
1910 struct bnad_tx_info *tx_info;
1911
1912 tx_info = &bnad->tx_info[0];
1913 if (!tx_info->tx)
1914 return;
1915
1916 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1917}
1918
1919/* Called with conf_lock & bnad->bna_lock held */
1920void
1921bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1922{
1923 struct bnad_rx_info *rx_info;
1924 int i;
1925
1926 for (i = 0; i < bnad->num_rx; i++) {
1927 rx_info = &bnad->rx_info[i];
1928 if (!rx_info->rx)
1929 continue;
1930 bna_rx_coalescing_timeo_set(rx_info->rx,
1931 bnad->rx_coalescing_timeo);
1932 }
1933}
1934
1935/*
1936 * Called with bnad->bna_lock held
1937 */
1938static int
1939bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1940{
1941 int ret;
1942
1943 if (!is_valid_ether_addr(mac_addr))
1944 return -EADDRNOTAVAIL;
1945
1946 /* If datapath is down, pretend everything went through */
1947 if (!bnad->rx_info[0].rx)
1948 return 0;
1949
1950 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1951 if (ret != BNA_CB_SUCCESS)
1952 return -EADDRNOTAVAIL;
1953
1954 return 0;
1955}
1956
1957/* Should be called with conf_lock held */
1958static int
1959bnad_enable_default_bcast(struct bnad *bnad)
1960{
1961 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1962 int ret;
1963 unsigned long flags;
1964
1965 init_completion(&bnad->bnad_completions.mcast_comp);
1966
1967 spin_lock_irqsave(&bnad->bna_lock, flags);
1968 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1969 bnad_cb_rx_mcast_add);
1970 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1971
1972 if (ret == BNA_CB_SUCCESS)
1973 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1974 else
1975 return -ENODEV;
1976
1977 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1978 return -ENODEV;
1979
1980 return 0;
1981}
1982
Rasesh Modyaad75b62010-12-23 21:45:08 +00001983/* Called with bnad_conf_lock() held */
1984static void
1985bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1986{
1987 u16 vlan_id;
1988 unsigned long flags;
1989
1990 if (!bnad->vlan_grp)
1991 return;
1992
1993 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1994
1995 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
1996 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
1997 continue;
1998 spin_lock_irqsave(&bnad->bna_lock, flags);
1999 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
2000 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2001 }
2002}
2003
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002004/* Statistics utilities */
2005void
Eric Dumazet250e0612010-09-02 12:45:02 -07002006bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002007{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002008 int i, j;
2009
2010 for (i = 0; i < bnad->num_rx; i++) {
2011 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2012 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002013 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002014 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002015 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002016 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2017 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2018 bnad->rx_info[i].rx_ctrl[j].ccb->
2019 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002020 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002021 bnad->rx_info[i].rx_ctrl[j].
2022 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002023 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002024 bnad->rx_info[i].rx_ctrl[j].
2025 ccb->rcb[1]->rxq->rx_bytes;
2026 }
2027 }
2028 }
2029 }
2030 for (i = 0; i < bnad->num_tx; i++) {
2031 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2032 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002033 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002034 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002035 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002036 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2037 }
2038 }
2039 }
2040}
2041
2042/*
2043 * Must be called with the bna_lock held.
2044 */
2045void
Eric Dumazet250e0612010-09-02 12:45:02 -07002046bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002047{
2048 struct bfi_ll_stats_mac *mac_stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002049 u64 bmap;
2050 int i;
2051
2052 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002053 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002054 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2055 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2056 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002057 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002058 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002059 stats->rx_dropped = mac_stats->rx_drop;
2060 stats->tx_dropped = mac_stats->tx_drop;
2061 stats->multicast = mac_stats->rx_multicast;
2062 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002063
Eric Dumazet250e0612010-09-02 12:45:02 -07002064 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002065
2066 /* receive ring buffer overflow ?? */
2067
Eric Dumazet250e0612010-09-02 12:45:02 -07002068 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2069 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002070 /* recv'r fifo overrun */
2071 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2072 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2073 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2074 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002075 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002076 bnad->stats.bna_stats->
2077 hw_stats->rxf_stats[i].frame_drops;
2078 break;
2079 }
2080 bmap >>= 1;
2081 }
2082}
2083
2084static void
2085bnad_mbox_irq_sync(struct bnad *bnad)
2086{
2087 u32 irq;
2088 unsigned long flags;
2089
2090 spin_lock_irqsave(&bnad->bna_lock, flags);
2091 if (bnad->cfg_flags & BNAD_CF_MSIX)
2092 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2093 else
2094 irq = bnad->pcidev->irq;
2095 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2096
2097 synchronize_irq(irq);
2098}
2099
2100/* Utility used by bnad_start_xmit, for doing TSO */
2101static int
2102bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2103{
2104 int err;
2105
2106 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2107 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2108 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2109 if (skb_header_cloned(skb)) {
2110 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2111 if (err) {
2112 BNAD_UPDATE_CTR(bnad, tso_err);
2113 return err;
2114 }
2115 }
2116
2117 /*
2118 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2119 * excluding the length field.
2120 */
2121 if (skb->protocol == htons(ETH_P_IP)) {
2122 struct iphdr *iph = ip_hdr(skb);
2123
2124 /* Do we really need these? */
2125 iph->tot_len = 0;
2126 iph->check = 0;
2127
2128 tcp_hdr(skb)->check =
2129 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2130 IPPROTO_TCP, 0);
2131 BNAD_UPDATE_CTR(bnad, tso4);
2132 } else {
2133 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2134
2135 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2136 ipv6h->payload_len = 0;
2137 tcp_hdr(skb)->check =
2138 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2139 IPPROTO_TCP, 0);
2140 BNAD_UPDATE_CTR(bnad, tso6);
2141 }
2142
2143 return 0;
2144}
2145
2146/*
2147 * Initialize Q numbers depending on Rx Paths
2148 * Called with bnad->bna_lock held, because of cfg_flags
2149 * access.
2150 */
2151static void
2152bnad_q_num_init(struct bnad *bnad)
2153{
2154 int rxps;
2155
2156 rxps = min((uint)num_online_cpus(),
2157 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2158
2159 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2160 rxps = 1; /* INTx */
2161
2162 bnad->num_rx = 1;
2163 bnad->num_tx = 1;
2164 bnad->num_rxp_per_rx = rxps;
2165 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2166}
2167
2168/*
2169 * Adjusts the Q numbers, given a number of msix vectors
2170 * Give preference to RSS as opposed to Tx priority Queues,
2171 * in such a case, just use 1 Tx Q
2172 * Called with bnad->bna_lock held b'cos of cfg_flags access
2173 */
2174static void
2175bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2176{
2177 bnad->num_txq_per_tx = 1;
2178 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2179 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2180 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2181 bnad->num_rxp_per_rx = msix_vectors -
2182 (bnad->num_tx * bnad->num_txq_per_tx) -
2183 BNAD_MAILBOX_MSIX_VECTORS;
2184 } else
2185 bnad->num_rxp_per_rx = 1;
2186}
2187
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002188/* Enable / disable device */
2189static void
2190bnad_device_disable(struct bnad *bnad)
2191{
2192 unsigned long flags;
2193
2194 init_completion(&bnad->bnad_completions.ioc_comp);
2195
2196 spin_lock_irqsave(&bnad->bna_lock, flags);
2197 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2198 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2199
2200 wait_for_completion(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002201}
2202
2203static int
2204bnad_device_enable(struct bnad *bnad)
2205{
2206 int err = 0;
2207 unsigned long flags;
2208
2209 init_completion(&bnad->bnad_completions.ioc_comp);
2210
2211 spin_lock_irqsave(&bnad->bna_lock, flags);
2212 bna_device_enable(&bnad->bna.device);
2213 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2214
2215 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2216
2217 if (bnad->bnad_completions.ioc_comp_status)
2218 err = bnad->bnad_completions.ioc_comp_status;
2219
2220 return err;
2221}
2222
2223/* Free BNA resources */
2224static void
2225bnad_res_free(struct bnad *bnad)
2226{
2227 int i;
2228 struct bna_res_info *res_info = &bnad->res_info[0];
2229
2230 for (i = 0; i < BNA_RES_T_MAX; i++) {
2231 if (res_info[i].res_type == BNA_RES_T_MEM)
2232 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2233 else
2234 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2235 }
2236}
2237
2238/* Allocates memory and interrupt resources for BNA */
2239static int
2240bnad_res_alloc(struct bnad *bnad)
2241{
2242 int i, err;
2243 struct bna_res_info *res_info = &bnad->res_info[0];
2244
2245 for (i = 0; i < BNA_RES_T_MAX; i++) {
2246 if (res_info[i].res_type == BNA_RES_T_MEM)
2247 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2248 else
2249 err = bnad_mbox_irq_alloc(bnad,
2250 &res_info[i].res_u.intr_info);
2251 if (err)
2252 goto err_return;
2253 }
2254 return 0;
2255
2256err_return:
2257 bnad_res_free(bnad);
2258 return err;
2259}
2260
2261/* Interrupt enable / disable */
2262static void
2263bnad_enable_msix(struct bnad *bnad)
2264{
2265 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002266 unsigned long flags;
2267
2268 spin_lock_irqsave(&bnad->bna_lock, flags);
2269 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2270 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2271 return;
2272 }
2273 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2274
2275 if (bnad->msix_table)
2276 return;
2277
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002278 bnad->msix_table =
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002279 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002280
2281 if (!bnad->msix_table)
2282 goto intx_mode;
2283
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002284 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002285 bnad->msix_table[i].entry = i;
2286
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002287 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002288 if (ret > 0) {
2289 /* Not enough MSI-X vectors. */
2290
2291 spin_lock_irqsave(&bnad->bna_lock, flags);
2292 /* ret = #of vectors that we got */
2293 bnad_q_num_adjust(bnad, ret);
2294 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2295
2296 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2297 + (bnad->num_rx
2298 * bnad->num_rxp_per_rx) +
2299 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002300
2301 /* Try once more with adjusted numbers */
2302 /* If this fails, fall back to INTx */
2303 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00002304 bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002305 if (ret)
2306 goto intx_mode;
2307
2308 } else if (ret < 0)
2309 goto intx_mode;
2310 return;
2311
2312intx_mode:
2313
2314 kfree(bnad->msix_table);
2315 bnad->msix_table = NULL;
2316 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002317 spin_lock_irqsave(&bnad->bna_lock, flags);
2318 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2319 bnad_q_num_init(bnad);
2320 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2321}
2322
2323static void
2324bnad_disable_msix(struct bnad *bnad)
2325{
2326 u32 cfg_flags;
2327 unsigned long flags;
2328
2329 spin_lock_irqsave(&bnad->bna_lock, flags);
2330 cfg_flags = bnad->cfg_flags;
2331 if (bnad->cfg_flags & BNAD_CF_MSIX)
2332 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2333 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2334
2335 if (cfg_flags & BNAD_CF_MSIX) {
2336 pci_disable_msix(bnad->pcidev);
2337 kfree(bnad->msix_table);
2338 bnad->msix_table = NULL;
2339 }
2340}
2341
2342/* Netdev entry points */
2343static int
2344bnad_open(struct net_device *netdev)
2345{
2346 int err;
2347 struct bnad *bnad = netdev_priv(netdev);
2348 struct bna_pause_config pause_config;
2349 int mtu;
2350 unsigned long flags;
2351
2352 mutex_lock(&bnad->conf_mutex);
2353
2354 /* Tx */
2355 err = bnad_setup_tx(bnad, 0);
2356 if (err)
2357 goto err_return;
2358
2359 /* Rx */
2360 err = bnad_setup_rx(bnad, 0);
2361 if (err)
2362 goto cleanup_tx;
2363
2364 /* Port */
2365 pause_config.tx_pause = 0;
2366 pause_config.rx_pause = 0;
2367
2368 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2369
2370 spin_lock_irqsave(&bnad->bna_lock, flags);
2371 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2372 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2373 bna_port_enable(&bnad->bna.port);
2374 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2375
2376 /* Enable broadcast */
2377 bnad_enable_default_bcast(bnad);
2378
Rasesh Modyaad75b62010-12-23 21:45:08 +00002379 /* Restore VLANs, if any */
2380 bnad_restore_vlans(bnad, 0);
2381
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002382 /* Set the UCAST address */
2383 spin_lock_irqsave(&bnad->bna_lock, flags);
2384 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2385 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386
2387 /* Start the stats timer */
2388 bnad_stats_timer_start(bnad);
2389
2390 mutex_unlock(&bnad->conf_mutex);
2391
2392 return 0;
2393
2394cleanup_tx:
2395 bnad_cleanup_tx(bnad, 0);
2396
2397err_return:
2398 mutex_unlock(&bnad->conf_mutex);
2399 return err;
2400}
2401
2402static int
2403bnad_stop(struct net_device *netdev)
2404{
2405 struct bnad *bnad = netdev_priv(netdev);
2406 unsigned long flags;
2407
2408 mutex_lock(&bnad->conf_mutex);
2409
2410 /* Stop the stats timer */
2411 bnad_stats_timer_stop(bnad);
2412
2413 init_completion(&bnad->bnad_completions.port_comp);
2414
2415 spin_lock_irqsave(&bnad->bna_lock, flags);
2416 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2417 bnad_cb_port_disabled);
2418 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2419
2420 wait_for_completion(&bnad->bnad_completions.port_comp);
2421
2422 bnad_cleanup_tx(bnad, 0);
2423 bnad_cleanup_rx(bnad, 0);
2424
2425 /* Synchronize mailbox IRQ */
2426 bnad_mbox_irq_sync(bnad);
2427
2428 mutex_unlock(&bnad->conf_mutex);
2429
2430 return 0;
2431}
2432
2433/* TX */
2434/*
2435 * bnad_start_xmit : Netdev entry point for Transmit
2436 * Called under lock held by net_device
2437 */
2438static netdev_tx_t
2439bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2440{
2441 struct bnad *bnad = netdev_priv(netdev);
2442
2443 u16 txq_prod, vlan_tag = 0;
2444 u32 unmap_prod, wis, wis_used, wi_range;
2445 u32 vectors, vect_id, i, acked;
2446 u32 tx_id;
2447 int err;
2448
2449 struct bnad_tx_info *tx_info;
2450 struct bna_tcb *tcb;
2451 struct bnad_unmap_q *unmap_q;
2452 dma_addr_t dma_addr;
2453 struct bna_txq_entry *txqent;
2454 bna_txq_wi_ctrl_flag_t flags;
2455
2456 if (unlikely
2457 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2458 dev_kfree_skb(skb);
2459 return NETDEV_TX_OK;
2460 }
2461
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002462 tx_id = 0;
2463
2464 tx_info = &bnad->tx_info[tx_id];
2465 tcb = tx_info->tcb[tx_id];
2466 unmap_q = tcb->unmap_q;
2467
Rasesh Modybe7fa322010-12-23 21:45:01 +00002468 /*
2469 * Takes care of the Tx that is scheduled between clearing the flag
2470 * and the netif_stop_queue() call.
2471 */
2472 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2473 dev_kfree_skb(skb);
2474 return NETDEV_TX_OK;
2475 }
2476
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002477 vectors = 1 + skb_shinfo(skb)->nr_frags;
2478 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2479 dev_kfree_skb(skb);
2480 return NETDEV_TX_OK;
2481 }
2482 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2483 acked = 0;
2484 if (unlikely
2485 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2486 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2487 if ((u16) (*tcb->hw_consumer_index) !=
2488 tcb->consumer_index &&
2489 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2490 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002491 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2492 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002493 smp_mb__before_clear_bit();
2494 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2495 } else {
2496 netif_stop_queue(netdev);
2497 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2498 }
2499
2500 smp_mb();
2501 /*
2502 * Check again to deal with race condition between
2503 * netif_stop_queue here, and netif_wake_queue in
2504 * interrupt handler which is not inside netif tx lock.
2505 */
2506 if (likely
2507 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2508 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2509 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2510 return NETDEV_TX_BUSY;
2511 } else {
2512 netif_wake_queue(netdev);
2513 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2514 }
2515 }
2516
2517 unmap_prod = unmap_q->producer_index;
2518 wis_used = 1;
2519 vect_id = 0;
2520 flags = 0;
2521
2522 txq_prod = tcb->producer_index;
2523 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2524 BUG_ON(!(wi_range <= tcb->q_depth));
2525 txqent->hdr.wi.reserved = 0;
2526 txqent->hdr.wi.num_vectors = vectors;
2527 txqent->hdr.wi.opcode =
2528 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2529 BNA_TXQ_WI_SEND));
2530
Jesse Grosseab6d182010-10-20 13:56:03 +00002531 if (vlan_tx_tag_present(skb)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002532 vlan_tag = (u16) vlan_tx_tag_get(skb);
2533 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2534 }
2535 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2536 vlan_tag =
2537 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2538 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2539 }
2540
2541 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2542
2543 if (skb_is_gso(skb)) {
2544 err = bnad_tso_prepare(bnad, skb);
2545 if (err) {
2546 dev_kfree_skb(skb);
2547 return NETDEV_TX_OK;
2548 }
2549 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2550 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2551 txqent->hdr.wi.l4_hdr_size_n_offset =
2552 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2553 (tcp_hdrlen(skb) >> 2,
2554 skb_transport_offset(skb)));
2555 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2556 u8 proto = 0;
2557
2558 txqent->hdr.wi.lso_mss = 0;
2559
2560 if (skb->protocol == htons(ETH_P_IP))
2561 proto = ip_hdr(skb)->protocol;
2562 else if (skb->protocol == htons(ETH_P_IPV6)) {
2563 /* nexthdr may not be TCP immediately. */
2564 proto = ipv6_hdr(skb)->nexthdr;
2565 }
2566 if (proto == IPPROTO_TCP) {
2567 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2568 txqent->hdr.wi.l4_hdr_size_n_offset =
2569 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2570 (0, skb_transport_offset(skb)));
2571
2572 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2573
2574 BUG_ON(!(skb_headlen(skb) >=
2575 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2576
2577 } else if (proto == IPPROTO_UDP) {
2578 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2579 txqent->hdr.wi.l4_hdr_size_n_offset =
2580 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2581 (0, skb_transport_offset(skb)));
2582
2583 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2584
2585 BUG_ON(!(skb_headlen(skb) >=
2586 skb_transport_offset(skb) +
2587 sizeof(struct udphdr)));
2588 } else {
2589 err = skb_checksum_help(skb);
2590 BNAD_UPDATE_CTR(bnad, csum_help);
2591 if (err) {
2592 dev_kfree_skb(skb);
2593 BNAD_UPDATE_CTR(bnad, csum_help_err);
2594 return NETDEV_TX_OK;
2595 }
2596 }
2597 } else {
2598 txqent->hdr.wi.lso_mss = 0;
2599 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2600 }
2601
2602 txqent->hdr.wi.flags = htons(flags);
2603
2604 txqent->hdr.wi.frame_length = htonl(skb->len);
2605
2606 unmap_q->unmap_array[unmap_prod].skb = skb;
2607 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2608 txqent->vector[vect_id].length = htons(skb_headlen(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +00002609 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2610 skb_headlen(skb), DMA_TO_DEVICE);
2611 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002612 dma_addr);
2613
2614 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2615 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2616
2617 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2618 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2619 u32 size = frag->size;
2620
2621 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2622 vect_id = 0;
2623 if (--wi_range)
2624 txqent++;
2625 else {
2626 BNA_QE_INDX_ADD(txq_prod, wis_used,
2627 tcb->q_depth);
2628 wis_used = 0;
2629 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2630 txqent, wi_range);
2631 BUG_ON(!(wi_range <= tcb->q_depth));
2632 }
2633 wis_used++;
2634 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2635 }
2636
2637 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2638 txqent->vector[vect_id].length = htons(size);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002639 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2640 frag->page_offset, size, DMA_TO_DEVICE);
2641 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002642 dma_addr);
2643 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2644 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2645 }
2646
2647 unmap_q->producer_index = unmap_prod;
2648 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2649 tcb->producer_index = txq_prod;
2650
2651 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00002652
2653 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2654 return NETDEV_TX_OK;
2655
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002656 bna_txq_prod_indx_doorbell(tcb);
2657
2658 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2659 tasklet_schedule(&bnad->tx_free_tasklet);
2660
2661 return NETDEV_TX_OK;
2662}
2663
2664/*
2665 * Used spin_lock to synchronize reading of stats structures, which
2666 * is written by BNA under the same lock.
2667 */
Eric Dumazet250e0612010-09-02 12:45:02 -07002668static struct rtnl_link_stats64 *
2669bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002670{
2671 struct bnad *bnad = netdev_priv(netdev);
2672 unsigned long flags;
2673
2674 spin_lock_irqsave(&bnad->bna_lock, flags);
2675
Eric Dumazet250e0612010-09-02 12:45:02 -07002676 bnad_netdev_qstats_fill(bnad, stats);
2677 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002678
2679 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2680
Eric Dumazet250e0612010-09-02 12:45:02 -07002681 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002682}
2683
2684static void
2685bnad_set_rx_mode(struct net_device *netdev)
2686{
2687 struct bnad *bnad = netdev_priv(netdev);
2688 u32 new_mask, valid_mask;
2689 unsigned long flags;
2690
2691 spin_lock_irqsave(&bnad->bna_lock, flags);
2692
2693 new_mask = valid_mask = 0;
2694
2695 if (netdev->flags & IFF_PROMISC) {
2696 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2697 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2698 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2699 bnad->cfg_flags |= BNAD_CF_PROMISC;
2700 }
2701 } else {
2702 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2703 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2704 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2705 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2706 }
2707 }
2708
2709 if (netdev->flags & IFF_ALLMULTI) {
2710 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2711 new_mask |= BNA_RXMODE_ALLMULTI;
2712 valid_mask |= BNA_RXMODE_ALLMULTI;
2713 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2714 }
2715 } else {
2716 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2717 new_mask &= ~BNA_RXMODE_ALLMULTI;
2718 valid_mask |= BNA_RXMODE_ALLMULTI;
2719 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2720 }
2721 }
2722
2723 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2724
2725 if (!netdev_mc_empty(netdev)) {
2726 u8 *mcaddr_list;
2727 int mc_count = netdev_mc_count(netdev);
2728
2729 /* Index 0 holds the broadcast address */
2730 mcaddr_list =
2731 kzalloc((mc_count + 1) * ETH_ALEN,
2732 GFP_ATOMIC);
2733 if (!mcaddr_list)
Jiri Slabyca1cef32010-09-04 02:08:41 +00002734 goto unlock;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002735
2736 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2737
2738 /* Copy rest of the MC addresses */
2739 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2740
2741 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2742 mcaddr_list, NULL);
2743
2744 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2745 kfree(mcaddr_list);
2746 }
Jiri Slabyca1cef32010-09-04 02:08:41 +00002747unlock:
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002748 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2749}
2750
2751/*
2752 * bna_lock is used to sync writes to netdev->addr
2753 * conf_lock cannot be used since this call may be made
2754 * in a non-blocking context.
2755 */
2756static int
2757bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2758{
2759 int err;
2760 struct bnad *bnad = netdev_priv(netdev);
2761 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2762 unsigned long flags;
2763
2764 spin_lock_irqsave(&bnad->bna_lock, flags);
2765
2766 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2767
2768 if (!err)
2769 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2770
2771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2772
2773 return err;
2774}
2775
2776static int
2777bnad_change_mtu(struct net_device *netdev, int new_mtu)
2778{
2779 int mtu, err = 0;
2780 unsigned long flags;
2781
2782 struct bnad *bnad = netdev_priv(netdev);
2783
2784 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2785 return -EINVAL;
2786
2787 mutex_lock(&bnad->conf_mutex);
2788
2789 netdev->mtu = new_mtu;
2790
2791 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2792
2793 spin_lock_irqsave(&bnad->bna_lock, flags);
2794 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2795 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2796
2797 mutex_unlock(&bnad->conf_mutex);
2798 return err;
2799}
2800
2801static void
2802bnad_vlan_rx_register(struct net_device *netdev,
2803 struct vlan_group *vlan_grp)
2804{
2805 struct bnad *bnad = netdev_priv(netdev);
2806
2807 mutex_lock(&bnad->conf_mutex);
2808 bnad->vlan_grp = vlan_grp;
2809 mutex_unlock(&bnad->conf_mutex);
2810}
2811
2812static void
2813bnad_vlan_rx_add_vid(struct net_device *netdev,
2814 unsigned short vid)
2815{
2816 struct bnad *bnad = netdev_priv(netdev);
2817 unsigned long flags;
2818
2819 if (!bnad->rx_info[0].rx)
2820 return;
2821
2822 mutex_lock(&bnad->conf_mutex);
2823
2824 spin_lock_irqsave(&bnad->bna_lock, flags);
2825 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2826 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2827
2828 mutex_unlock(&bnad->conf_mutex);
2829}
2830
2831static void
2832bnad_vlan_rx_kill_vid(struct net_device *netdev,
2833 unsigned short vid)
2834{
2835 struct bnad *bnad = netdev_priv(netdev);
2836 unsigned long flags;
2837
2838 if (!bnad->rx_info[0].rx)
2839 return;
2840
2841 mutex_lock(&bnad->conf_mutex);
2842
2843 spin_lock_irqsave(&bnad->bna_lock, flags);
2844 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2845 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2846
2847 mutex_unlock(&bnad->conf_mutex);
2848}
2849
2850#ifdef CONFIG_NET_POLL_CONTROLLER
2851static void
2852bnad_netpoll(struct net_device *netdev)
2853{
2854 struct bnad *bnad = netdev_priv(netdev);
2855 struct bnad_rx_info *rx_info;
2856 struct bnad_rx_ctrl *rx_ctrl;
2857 u32 curr_mask;
2858 int i, j;
2859
2860 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2861 bna_intx_disable(&bnad->bna, curr_mask);
2862 bnad_isr(bnad->pcidev->irq, netdev);
2863 bna_intx_enable(&bnad->bna, curr_mask);
2864 } else {
2865 for (i = 0; i < bnad->num_rx; i++) {
2866 rx_info = &bnad->rx_info[i];
2867 if (!rx_info->rx)
2868 continue;
2869 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2870 rx_ctrl = &rx_info->rx_ctrl[j];
2871 if (rx_ctrl->ccb) {
2872 bnad_disable_rx_irq(bnad,
2873 rx_ctrl->ccb);
2874 bnad_netif_rx_schedule_poll(bnad,
2875 rx_ctrl->ccb);
2876 }
2877 }
2878 }
2879 }
2880}
2881#endif
2882
2883static const struct net_device_ops bnad_netdev_ops = {
2884 .ndo_open = bnad_open,
2885 .ndo_stop = bnad_stop,
2886 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07002887 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002888 .ndo_set_rx_mode = bnad_set_rx_mode,
2889 .ndo_set_multicast_list = bnad_set_rx_mode,
2890 .ndo_validate_addr = eth_validate_addr,
2891 .ndo_set_mac_address = bnad_set_mac_address,
2892 .ndo_change_mtu = bnad_change_mtu,
2893 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2894 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2895 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2896#ifdef CONFIG_NET_POLL_CONTROLLER
2897 .ndo_poll_controller = bnad_netpoll
2898#endif
2899};
2900
2901static void
2902bnad_netdev_init(struct bnad *bnad, bool using_dac)
2903{
2904 struct net_device *netdev = bnad->netdev;
2905
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00002906 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2907 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2908 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002909
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00002910 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2911 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2912 NETIF_F_TSO | NETIF_F_TSO6;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002913
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00002914 netdev->features |= netdev->hw_features |
2915 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002916
2917 if (using_dac)
2918 netdev->features |= NETIF_F_HIGHDMA;
2919
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002920 netdev->mem_start = bnad->mmio_start;
2921 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2922
2923 netdev->netdev_ops = &bnad_netdev_ops;
2924 bnad_set_ethtool_ops(netdev);
2925}
2926
2927/*
2928 * 1. Initialize the bnad structure
2929 * 2. Setup netdev pointer in pci_dev
2930 * 3. Initialze Tx free tasklet
2931 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2932 */
2933static int
2934bnad_init(struct bnad *bnad,
2935 struct pci_dev *pdev, struct net_device *netdev)
2936{
2937 unsigned long flags;
2938
2939 SET_NETDEV_DEV(netdev, &pdev->dev);
2940 pci_set_drvdata(pdev, netdev);
2941
2942 bnad->netdev = netdev;
2943 bnad->pcidev = pdev;
2944 bnad->mmio_start = pci_resource_start(pdev, 0);
2945 bnad->mmio_len = pci_resource_len(pdev, 0);
2946 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2947 if (!bnad->bar0) {
2948 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2949 pci_set_drvdata(pdev, NULL);
2950 return -ENOMEM;
2951 }
2952 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2953 (unsigned long long) bnad->mmio_len);
2954
2955 spin_lock_irqsave(&bnad->bna_lock, flags);
2956 if (!bnad_msix_disable)
2957 bnad->cfg_flags = BNAD_CF_MSIX;
2958
2959 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2960
2961 bnad_q_num_init(bnad);
2962 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2963
2964 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2965 (bnad->num_rx * bnad->num_rxp_per_rx) +
2966 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002967
2968 bnad->txq_depth = BNAD_TXQ_DEPTH;
2969 bnad->rxq_depth = BNAD_RXQ_DEPTH;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002970
2971 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2972 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2973
2974 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2975 (unsigned long)bnad);
2976
2977 return 0;
2978}
2979
2980/*
2981 * Must be called after bnad_pci_uninit()
2982 * so that iounmap() and pci_set_drvdata(NULL)
2983 * happens only after PCI uninitialization.
2984 */
2985static void
2986bnad_uninit(struct bnad *bnad)
2987{
2988 if (bnad->bar0)
2989 iounmap(bnad->bar0);
2990 pci_set_drvdata(bnad->pcidev, NULL);
2991}
2992
2993/*
2994 * Initialize locks
2995 a) Per device mutes used for serializing configuration
2996 changes from OS interface
2997 b) spin lock used to protect bna state machine
2998 */
2999static void
3000bnad_lock_init(struct bnad *bnad)
3001{
3002 spin_lock_init(&bnad->bna_lock);
3003 mutex_init(&bnad->conf_mutex);
3004}
3005
3006static void
3007bnad_lock_uninit(struct bnad *bnad)
3008{
3009 mutex_destroy(&bnad->conf_mutex);
3010}
3011
3012/* PCI Initialization */
3013static int
3014bnad_pci_init(struct bnad *bnad,
3015 struct pci_dev *pdev, bool *using_dac)
3016{
3017 int err;
3018
3019 err = pci_enable_device(pdev);
3020 if (err)
3021 return err;
3022 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err)
3024 goto disable_device;
Ivan Vecera5ea74312011-02-02 04:37:02 +00003025 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3026 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003027 *using_dac = 1;
3028 } else {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003029 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003030 if (err) {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003031 err = dma_set_coherent_mask(&pdev->dev,
3032 DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003033 if (err)
3034 goto release_regions;
3035 }
3036 *using_dac = 0;
3037 }
3038 pci_set_master(pdev);
3039 return 0;
3040
3041release_regions:
3042 pci_release_regions(pdev);
3043disable_device:
3044 pci_disable_device(pdev);
3045
3046 return err;
3047}
3048
3049static void
3050bnad_pci_uninit(struct pci_dev *pdev)
3051{
3052 pci_release_regions(pdev);
3053 pci_disable_device(pdev);
3054}
3055
3056static int __devinit
3057bnad_pci_probe(struct pci_dev *pdev,
3058 const struct pci_device_id *pcidev_id)
3059{
Rasesh Modyaad75b62010-12-23 21:45:08 +00003060 bool using_dac = false;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003061 int err;
3062 struct bnad *bnad;
3063 struct bna *bna;
3064 struct net_device *netdev;
3065 struct bfa_pcidev pcidev_info;
3066 unsigned long flags;
3067
3068 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3069 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3070
3071 mutex_lock(&bnad_fwimg_mutex);
3072 if (!cna_get_firmware_buf(pdev)) {
3073 mutex_unlock(&bnad_fwimg_mutex);
3074 pr_warn("Failed to load Firmware Image!\n");
3075 return -ENODEV;
3076 }
3077 mutex_unlock(&bnad_fwimg_mutex);
3078
3079 /*
3080 * Allocates sizeof(struct net_device + struct bnad)
3081 * bnad = netdev->priv
3082 */
3083 netdev = alloc_etherdev(sizeof(struct bnad));
3084 if (!netdev) {
3085 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3086 err = -ENOMEM;
3087 return err;
3088 }
3089 bnad = netdev_priv(netdev);
3090
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003091 /*
3092 * PCI initialization
3093 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003094 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003095 */
3096 err = bnad_pci_init(bnad, pdev, &using_dac);
3097 if (err)
3098 goto free_netdev;
3099
3100 bnad_lock_init(bnad);
3101 /*
3102 * Initialize bnad structure
3103 * Setup relation between pci_dev & netdev
3104 * Init Tx free tasklet
3105 */
3106 err = bnad_init(bnad, pdev, netdev);
3107 if (err)
3108 goto pci_uninit;
3109 /* Initialize netdev structure, set up ethtool ops */
3110 bnad_netdev_init(bnad, using_dac);
3111
Rasesh Mody815f41e2010-12-23 21:45:03 +00003112 /* Set link to down state */
3113 netif_carrier_off(netdev);
3114
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003115 bnad_enable_msix(bnad);
3116
3117 /* Get resource requirement form bna */
3118 bna_res_req(&bnad->res_info[0]);
3119
3120 /* Allocate resources from bna */
3121 err = bnad_res_alloc(bnad);
3122 if (err)
3123 goto free_netdev;
3124
3125 bna = &bnad->bna;
3126
3127 /* Setup pcidev_info for bna_init() */
3128 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3129 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3130 pcidev_info.device_id = bnad->pcidev->device;
3131 pcidev_info.pci_bar_kva = bnad->bar0;
3132
3133 mutex_lock(&bnad->conf_mutex);
3134
3135 spin_lock_irqsave(&bnad->bna_lock, flags);
3136 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3138
3139 bnad->stats.bna_stats = &bna->stats;
3140
3141 /* Set up timers */
3142 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3143 ((unsigned long)bnad));
3144 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3145 ((unsigned long)bnad));
Rasesh Mody1d32f762010-12-23 21:45:09 +00003146 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3147 ((unsigned long)bnad));
3148 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003149 ((unsigned long)bnad));
3150
3151 /* Now start the timer before calling IOC */
Rasesh Mody1d32f762010-12-23 21:45:09 +00003152 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003153 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3154
3155 /*
3156 * Start the chip
3157 * Don't care even if err != 0, bna state machine will
3158 * deal with it
3159 */
3160 err = bnad_device_enable(bnad);
3161
3162 /* Get the burnt-in mac */
3163 spin_lock_irqsave(&bnad->bna_lock, flags);
3164 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3165 bnad_set_netdev_perm_addr(bnad);
3166 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3167
3168 mutex_unlock(&bnad->conf_mutex);
3169
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003170 /* Finally, reguister with net_device layer */
3171 err = register_netdev(netdev);
3172 if (err) {
3173 pr_err("BNA : Registering with netdev failed\n");
3174 goto disable_device;
3175 }
3176
3177 return 0;
3178
3179disable_device:
3180 mutex_lock(&bnad->conf_mutex);
3181 bnad_device_disable(bnad);
3182 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3183 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3184 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3185 spin_lock_irqsave(&bnad->bna_lock, flags);
3186 bna_uninit(bna);
3187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3188 mutex_unlock(&bnad->conf_mutex);
3189
3190 bnad_res_free(bnad);
3191 bnad_disable_msix(bnad);
3192pci_uninit:
3193 bnad_pci_uninit(pdev);
3194 bnad_lock_uninit(bnad);
3195 bnad_uninit(bnad);
3196free_netdev:
3197 free_netdev(netdev);
3198 return err;
3199}
3200
3201static void __devexit
3202bnad_pci_remove(struct pci_dev *pdev)
3203{
3204 struct net_device *netdev = pci_get_drvdata(pdev);
3205 struct bnad *bnad;
3206 struct bna *bna;
3207 unsigned long flags;
3208
3209 if (!netdev)
3210 return;
3211
3212 pr_info("%s bnad_pci_remove\n", netdev->name);
3213 bnad = netdev_priv(netdev);
3214 bna = &bnad->bna;
3215
3216 unregister_netdev(netdev);
3217
3218 mutex_lock(&bnad->conf_mutex);
3219 bnad_device_disable(bnad);
3220 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3221 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3222 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3223 spin_lock_irqsave(&bnad->bna_lock, flags);
3224 bna_uninit(bna);
3225 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3226 mutex_unlock(&bnad->conf_mutex);
3227
3228 bnad_res_free(bnad);
3229 bnad_disable_msix(bnad);
3230 bnad_pci_uninit(pdev);
3231 bnad_lock_uninit(bnad);
3232 bnad_uninit(bnad);
3233 free_netdev(netdev);
3234}
3235
Rasesh Modyb7ee31c2010-10-05 15:46:05 +00003236static const struct pci_device_id bnad_pci_id_table[] = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003237 {
3238 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3239 PCI_DEVICE_ID_BROCADE_CT),
3240 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3241 .class_mask = 0xffff00
3242 }, {0, }
3243};
3244
3245MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3246
3247static struct pci_driver bnad_pci_driver = {
3248 .name = BNAD_NAME,
3249 .id_table = bnad_pci_id_table,
3250 .probe = bnad_pci_probe,
3251 .remove = __devexit_p(bnad_pci_remove),
3252};
3253
3254static int __init
3255bnad_module_init(void)
3256{
3257 int err;
3258
3259 pr_info("Brocade 10G Ethernet driver\n");
3260
Rasesh Mody8a891422010-08-25 23:00:27 -07003261 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003262
3263 err = pci_register_driver(&bnad_pci_driver);
3264 if (err < 0) {
3265 pr_err("bna : PCI registration failed in module init "
3266 "(%d)\n", err);
3267 return err;
3268 }
3269
3270 return 0;
3271}
3272
3273static void __exit
3274bnad_module_exit(void)
3275{
3276 pci_unregister_driver(&bnad_pci_driver);
3277
3278 if (bfi_fw)
3279 release_firmware(bfi_fw);
3280}
3281
3282module_init(bnad_module_init);
3283module_exit(bnad_module_exit);
3284
3285MODULE_AUTHOR("Brocade");
3286MODULE_LICENSE("GPL");
3287MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3288MODULE_VERSION(BNAD_VERSION);
3289MODULE_FIRMWARE(CNA_FW_FILE_CT);