blob: 6e871a7b4569d00251e8e011258817b8dfce36d2 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
Rasesh Mody2732ba52015-02-19 16:02:31 -05002 * Linux network driver for QLogic BR-series Converged Network Adapter.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
Rasesh Mody2732ba52015-02-19 16:02:31 -050014 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
Rasesh Mody8b230ed2010-08-23 20:24:12 -070016 * All rights reserved
Rasesh Mody2732ba52015-02-19 16:02:31 -050017 * www.qlogic.com
Rasesh Mody8b230ed2010-08-23 20:24:12 -070018 */
Jiri Pirkof859d7c2011-07-20 04:54:14 +000019#include <linux/bitops.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070020#include <linux/netdevice.h>
21#include <linux/skbuff.h>
22#include <linux/etherdevice.h>
23#include <linux/in.h>
24#include <linux/ethtool.h>
25#include <linux/if_vlan.h>
26#include <linux/if_ether.h>
27#include <linux/ip.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040028#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040029#include <linux/module.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070030
31#include "bnad.h"
32#include "bna.h"
33#include "cna.h"
34
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000035static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070036
37/*
38 * Module params
39 */
40static uint bnad_msix_disable;
41module_param(bnad_msix_disable, uint, 0444);
42MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43
44static uint bnad_ioc_auto_recover = 1;
45module_param(bnad_ioc_auto_recover, uint, 0444);
46MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47
Krishna Gudipati7afc5db2011-12-22 13:30:19 +000048static uint bna_debugfs_enable = 1;
49module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 " Range[false:0|true:1]");
52
Rasesh Mody8b230ed2010-08-23 20:24:12 -070053/*
54 * Global variables
55 */
stephen hemminger482da0f2013-12-13 16:40:10 -080056static u32 bnad_rxqs_per_cq = 2;
stephen hemmingere1e09182012-01-04 13:02:24 +000057static u32 bna_id;
58static struct mutex bnad_list_mutex;
59static LIST_HEAD(bnad_list);
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +020060static const u8 bnad_bcast_addr[] __aligned(2) =
61 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
Rasesh Mody8b230ed2010-08-23 20:24:12 -070062
63/*
64 * Local MACROS
65 */
Rasesh Mody8b230ed2010-08-23 20:24:12 -070066#define BNAD_GET_MBOX_IRQ(_bnad) \
67 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
Rasesh Mody8811e262011-07-22 08:07:44 +000068 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
Rasesh Mody8b230ed2010-08-23 20:24:12 -070069 ((_bnad)->pcidev->irq))
70
Rasesh Mody52165622012-12-11 12:24:51 +000071#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
Rasesh Mody8b230ed2010-08-23 20:24:12 -070072do { \
73 (_res_info)->res_type = BNA_RES_T_MEM; \
74 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
75 (_res_info)->res_u.mem_info.num = (_num); \
Rasesh Mody52165622012-12-11 12:24:51 +000076 (_res_info)->res_u.mem_info.len = (_size); \
Rasesh Mody8b230ed2010-08-23 20:24:12 -070077} while (0)
78
Krishna Gudipati72a97302011-12-22 13:29:45 +000079static void
80bnad_add_to_list(struct bnad *bnad)
81{
82 mutex_lock(&bnad_list_mutex);
83 list_add_tail(&bnad->list_entry, &bnad_list);
84 bnad->id = bna_id++;
85 mutex_unlock(&bnad_list_mutex);
86}
87
88static void
89bnad_remove_from_list(struct bnad *bnad)
90{
91 mutex_lock(&bnad_list_mutex);
92 list_del(&bnad->list_entry);
93 mutex_unlock(&bnad_list_mutex);
94}
95
Rasesh Mody8b230ed2010-08-23 20:24:12 -070096/*
97 * Reinitialize completions in CQ, once Rx is taken down
98 */
99static void
Jing Huangb3cc6e82012-04-04 05:44:14 +0000100bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700101{
Rasesh Mody52165622012-12-11 12:24:51 +0000102 struct bna_cq_entry *cmpl;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700103 int i;
104
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700105 for (i = 0; i < ccb->q_depth; i++) {
Rasesh Mody52165622012-12-11 12:24:51 +0000106 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700107 cmpl->valid = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700108 }
109}
110
Rasesh Mody52165622012-12-11 12:24:51 +0000111/* Tx Datapath functions */
112
113
114/* Caller should ensure that the entry at unmap_q[index] is valid */
Rasesh Mody271e8b72011-08-30 15:27:40 +0000115static u32
Rasesh Mody52165622012-12-11 12:24:51 +0000116bnad_tx_buff_unmap(struct bnad *bnad,
117 struct bnad_tx_unmap *unmap_q,
118 u32 q_depth, u32 index)
Rasesh Mody271e8b72011-08-30 15:27:40 +0000119{
Rasesh Mody52165622012-12-11 12:24:51 +0000120 struct bnad_tx_unmap *unmap;
121 struct sk_buff *skb;
122 int vector, nvecs;
Rasesh Mody271e8b72011-08-30 15:27:40 +0000123
Rasesh Mody52165622012-12-11 12:24:51 +0000124 unmap = &unmap_q[index];
125 nvecs = unmap->nvecs;
Rasesh Mody271e8b72011-08-30 15:27:40 +0000126
Rasesh Mody52165622012-12-11 12:24:51 +0000127 skb = unmap->skb;
128 unmap->skb = NULL;
129 unmap->nvecs = 0;
130 dma_unmap_single(&bnad->pcidev->dev,
131 dma_unmap_addr(&unmap->vectors[0], dma_addr),
132 skb_headlen(skb), DMA_TO_DEVICE);
133 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
134 nvecs--;
135
136 vector = 0;
137 while (nvecs) {
138 vector++;
139 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
140 vector = 0;
141 BNA_QE_INDX_INC(index, q_depth);
142 unmap = &unmap_q[index];
143 }
144
145 dma_unmap_page(&bnad->pcidev->dev,
146 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
Rasesh Mody24f5d332013-12-17 17:07:40 -0800147 dma_unmap_len(&unmap->vectors[vector], dma_len),
148 DMA_TO_DEVICE);
Rasesh Mody52165622012-12-11 12:24:51 +0000149 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
150 nvecs--;
Rasesh Mody271e8b72011-08-30 15:27:40 +0000151 }
152
Rasesh Mody52165622012-12-11 12:24:51 +0000153 BNA_QE_INDX_INC(index, q_depth);
154
Rasesh Mody271e8b72011-08-30 15:27:40 +0000155 return index;
156}
157
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700158/*
159 * Frees all pending Tx Bufs
160 * At this point no activity is expected on the Q,
161 * so DMA unmap & freeing is fine.
162 */
163static void
Rasesh Mody52165622012-12-11 12:24:51 +0000164bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700165{
Rasesh Mody52165622012-12-11 12:24:51 +0000166 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
167 struct sk_buff *skb;
168 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700169
Rasesh Mody52165622012-12-11 12:24:51 +0000170 for (i = 0; i < tcb->q_depth; i++) {
171 skb = unmap_q[i].skb;
Rasesh Mody938fa482011-08-30 15:27:47 +0000172 if (!skb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700173 continue;
Rasesh Mody52165622012-12-11 12:24:51 +0000174 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700175
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700176 dev_kfree_skb_any(skb);
177 }
178}
179
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700180/*
Jing Huangb3cc6e82012-04-04 05:44:14 +0000181 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700182 * Can be called in a) Interrupt context
183 * b) Sending context
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700184 */
185static u32
Rasesh Mody52165622012-12-11 12:24:51 +0000186bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700187{
Rasesh Mody52165622012-12-11 12:24:51 +0000188 u32 sent_packets = 0, sent_bytes = 0;
189 u32 wis, unmap_wis, hw_cons, cons, q_depth;
190 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
191 struct bnad_tx_unmap *unmap;
192 struct sk_buff *skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700193
Jing Huangd95d1082012-04-04 05:43:48 +0000194 /* Just return if TX is stopped */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000195 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700196 return 0;
197
Rasesh Mody52165622012-12-11 12:24:51 +0000198 hw_cons = *(tcb->hw_consumer_index);
199 cons = tcb->consumer_index;
200 q_depth = tcb->q_depth;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700201
Rasesh Mody52165622012-12-11 12:24:51 +0000202 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700203 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
204
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700205 while (wis) {
Rasesh Mody52165622012-12-11 12:24:51 +0000206 unmap = &unmap_q[cons];
207
208 skb = unmap->skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700209
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700210 sent_packets++;
211 sent_bytes += skb->len;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700212
Rasesh Mody52165622012-12-11 12:24:51 +0000213 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
214 wis -= unmap_wis;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700215
Rasesh Mody52165622012-12-11 12:24:51 +0000216 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700217 dev_kfree_skb_any(skb);
218 }
219
220 /* Update consumer pointers. */
Rasesh Mody52165622012-12-11 12:24:51 +0000221 tcb->consumer_index = hw_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700222
223 tcb->txq->tx_packets += sent_packets;
224 tcb->txq->tx_bytes += sent_bytes;
225
226 return sent_packets;
227}
228
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700229static u32
Jing Huangb3cc6e82012-04-04 05:44:14 +0000230bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700231{
232 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000233 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700234
235 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
236 return 0;
237
Jing Huangb3cc6e82012-04-04 05:44:14 +0000238 sent = bnad_txcmpl_process(bnad, tcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700239 if (sent) {
240 if (netif_queue_stopped(netdev) &&
241 netif_carrier_ok(netdev) &&
242 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
243 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000244 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
245 netif_wake_queue(netdev);
246 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
247 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700248 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000249 }
250
251 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700252 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700253
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100254 smp_mb__before_atomic();
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700255 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
256
257 return sent;
258}
259
260/* MSIX Tx Completion Handler */
261static irqreturn_t
262bnad_msix_tx(int irq, void *data)
263{
264 struct bna_tcb *tcb = (struct bna_tcb *)data;
265 struct bnad *bnad = tcb->bnad;
266
Jing Huangb3cc6e82012-04-04 05:44:14 +0000267 bnad_tx_complete(bnad, tcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700268
269 return IRQ_HANDLED;
270}
271
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000272static inline void
273bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
274{
275 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
276
277 unmap_q->reuse_pi = -1;
278 unmap_q->alloc_order = -1;
279 unmap_q->map_size = 0;
280 unmap_q->type = BNAD_RXBUF_NONE;
281}
282
283/* Default is page-based allocation. Multi-buffer support - TBD */
284static int
285bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
286{
287 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
Rasesh Modye29aa332013-12-17 17:07:35 -0800288 int order;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000289
290 bnad_rxq_alloc_uninit(bnad, rcb);
291
Rasesh Modye29aa332013-12-17 17:07:35 -0800292 order = get_order(rcb->rxq->buffer_size);
293
294 unmap_q->type = BNAD_RXBUF_PAGE;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000295
296 if (bna_is_small_rxq(rcb->id)) {
297 unmap_q->alloc_order = 0;
298 unmap_q->map_size = rcb->rxq->buffer_size;
299 } else {
Rasesh Modye29aa332013-12-17 17:07:35 -0800300 if (rcb->rxq->multi_buffer) {
301 unmap_q->alloc_order = 0;
302 unmap_q->map_size = rcb->rxq->buffer_size;
303 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
304 } else {
305 unmap_q->alloc_order = order;
306 unmap_q->map_size =
307 (rcb->rxq->buffer_size > 2048) ?
308 PAGE_SIZE << order : 2048;
309 }
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000310 }
311
312 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
313
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000314 return 0;
315}
316
317static inline void
318bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
319{
320 if (!unmap->page)
321 return;
322
323 dma_unmap_page(&bnad->pcidev->dev,
324 dma_unmap_addr(&unmap->vector, dma_addr),
325 unmap->vector.len, DMA_FROM_DEVICE);
326 put_page(unmap->page);
327 unmap->page = NULL;
328 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
329 unmap->vector.len = 0;
330}
331
332static inline void
333bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
334{
335 if (!unmap->skb)
336 return;
337
338 dma_unmap_single(&bnad->pcidev->dev,
339 dma_unmap_addr(&unmap->vector, dma_addr),
340 unmap->vector.len, DMA_FROM_DEVICE);
341 dev_kfree_skb_any(unmap->skb);
342 unmap->skb = NULL;
343 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
344 unmap->vector.len = 0;
345}
346
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700347static void
Jing Huangb3cc6e82012-04-04 05:44:14 +0000348bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700349{
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000350 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
Rasesh Mody52165622012-12-11 12:24:51 +0000351 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700352
Rasesh Mody52165622012-12-11 12:24:51 +0000353 for (i = 0; i < rcb->q_depth; i++) {
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000354 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
Rasesh Mody52165622012-12-11 12:24:51 +0000355
Rasesh Modye29aa332013-12-17 17:07:35 -0800356 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000357 bnad_rxq_cleanup_skb(bnad, unmap);
Rasesh Modye29aa332013-12-17 17:07:35 -0800358 else
359 bnad_rxq_cleanup_page(bnad, unmap);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700360 }
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000361 bnad_rxq_alloc_uninit(bnad, rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700362}
363
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000364static u32
365bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700366{
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000367 u32 alloced, prod, q_depth;
368 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
369 struct bnad_rx_unmap *unmap, *prev;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700370 struct bna_rxq_entry *rxent;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000371 struct page *page;
372 u32 page_offset, alloc_size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700373 dma_addr_t dma_addr;
374
Rasesh Mody52165622012-12-11 12:24:51 +0000375 prod = rcb->producer_index;
376 q_depth = rcb->q_depth;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700377
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000378 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
379 alloced = 0;
380
381 while (nalloc--) {
382 unmap = &unmap_q->unmap[prod];
383
384 if (unmap_q->reuse_pi < 0) {
385 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
386 unmap_q->alloc_order);
387 page_offset = 0;
388 } else {
389 prev = &unmap_q->unmap[unmap_q->reuse_pi];
390 page = prev->page;
391 page_offset = prev->page_offset + unmap_q->map_size;
392 get_page(page);
393 }
394
395 if (unlikely(!page)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700396 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
Rasesh Mody3caa1e952011-08-30 15:27:42 +0000397 rcb->rxq->rxbuf_alloc_failed++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700398 goto finishing;
399 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700400
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
402 unmap_q->map_size, DMA_FROM_DEVICE);
403
404 unmap->page = page;
405 unmap->page_offset = page_offset;
Rasesh Mody52165622012-12-11 12:24:51 +0000406 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000407 unmap->vector.len = unmap_q->map_size;
408 page_offset += unmap_q->map_size;
409
410 if (page_offset < alloc_size)
411 unmap_q->reuse_pi = prod;
412 else
413 unmap_q->reuse_pi = -1;
414
415 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
416 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
Rasesh Mody52165622012-12-11 12:24:51 +0000417 BNA_QE_INDX_INC(prod, q_depth);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700418 alloced++;
419 }
420
421finishing:
422 if (likely(alloced)) {
Rasesh Mody52165622012-12-11 12:24:51 +0000423 rcb->producer_index = prod;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700424 smp_mb();
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +0000425 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
Rasesh Modybe7fa322010-12-23 21:45:01 +0000426 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700427 }
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000428
429 return alloced;
430}
431
432static u32
433bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
434{
435 u32 alloced, prod, q_depth, buff_sz;
436 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
437 struct bnad_rx_unmap *unmap;
438 struct bna_rxq_entry *rxent;
439 struct sk_buff *skb;
440 dma_addr_t dma_addr;
441
442 buff_sz = rcb->rxq->buffer_size;
443 prod = rcb->producer_index;
444 q_depth = rcb->q_depth;
445
446 alloced = 0;
447 while (nalloc--) {
448 unmap = &unmap_q->unmap[prod];
449
450 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
451
452 if (unlikely(!skb)) {
453 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
454 rcb->rxq->rxbuf_alloc_failed++;
455 goto finishing;
456 }
457 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
458 buff_sz, DMA_FROM_DEVICE);
459
460 unmap->skb = skb;
461 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
462 unmap->vector.len = buff_sz;
463
464 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
465 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
466 BNA_QE_INDX_INC(prod, q_depth);
467 alloced++;
468 }
469
470finishing:
471 if (likely(alloced)) {
472 rcb->producer_index = prod;
473 smp_mb();
474 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
475 bna_rxq_prod_indx_doorbell(rcb);
476 }
477
478 return alloced;
479}
480
481static inline void
482bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
483{
484 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
485 u32 to_alloc;
486
487 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
488 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
489 return;
490
Rasesh Modye29aa332013-12-17 17:07:35 -0800491 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000492 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
Rasesh Modye29aa332013-12-17 17:07:35 -0800493 else
494 bnad_rxq_refill_page(bnad, rcb, to_alloc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700495}
496
Rasesh Mody5e46631f2012-12-11 12:24:50 +0000497#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
498 BNA_CQ_EF_IPV6 | \
499 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
500 BNA_CQ_EF_L4_CKSUM_OK)
501
502#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
503 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
504#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
505 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
506#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
507 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
508#define flags_udp6 (BNA_CQ_EF_IPV6 | \
509 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
510
Rasesh Modye29aa332013-12-17 17:07:35 -0800511static void
512bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
513 u32 sop_ci, u32 nvecs)
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000514{
Rasesh Modye29aa332013-12-17 17:07:35 -0800515 struct bnad_rx_unmap_q *unmap_q;
516 struct bnad_rx_unmap *unmap;
517 u32 ci, vec;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000518
Rasesh Modye29aa332013-12-17 17:07:35 -0800519 unmap_q = rcb->unmap_q;
520 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
521 unmap = &unmap_q->unmap[ci];
522 BNA_QE_INDX_INC(ci, rcb->q_depth);
523
524 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
525 bnad_rxq_cleanup_skb(bnad, unmap);
526 else
527 bnad_rxq_cleanup_page(bnad, unmap);
528 }
529}
530
531static void
532bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
533 u32 sop_ci, u32 nvecs, u32 last_fraglen)
534{
535 struct bnad *bnad;
536 u32 ci, vec, len, totlen = 0;
537 struct bnad_rx_unmap_q *unmap_q;
538 struct bnad_rx_unmap *unmap;
539
540 unmap_q = rcb->unmap_q;
541 bnad = rcb->bnad;
Rasesh Mody66f95132013-12-17 17:07:36 -0800542
543 /* prefetch header */
544 prefetch(page_address(unmap_q->unmap[sop_ci].page) +
545 unmap_q->unmap[sop_ci].page_offset);
546
Rasesh Modye29aa332013-12-17 17:07:35 -0800547 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
548 unmap = &unmap_q->unmap[ci];
549 BNA_QE_INDX_INC(ci, rcb->q_depth);
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000550
551 dma_unmap_page(&bnad->pcidev->dev,
552 dma_unmap_addr(&unmap->vector, dma_addr),
553 unmap->vector.len, DMA_FROM_DEVICE);
Rasesh Modye29aa332013-12-17 17:07:35 -0800554
555 len = (vec == nvecs) ?
556 last_fraglen : unmap->vector.len;
Eric Dumazetf2d9da12014-10-17 12:45:55 -0700557 skb->truesize += unmap->vector.len;
Rasesh Modye29aa332013-12-17 17:07:35 -0800558 totlen += len;
559
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000560 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Rasesh Modye29aa332013-12-17 17:07:35 -0800561 unmap->page, unmap->page_offset, len);
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000562
563 unmap->page = NULL;
564 unmap->vector.len = 0;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000565 }
566
Rasesh Modye29aa332013-12-17 17:07:35 -0800567 skb->len += totlen;
568 skb->data_len += totlen;
Rasesh Modye29aa332013-12-17 17:07:35 -0800569}
570
571static inline void
572bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
573 struct bnad_rx_unmap *unmap, u32 len)
574{
575 prefetch(skb->data);
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000576
577 dma_unmap_single(&bnad->pcidev->dev,
578 dma_unmap_addr(&unmap->vector, dma_addr),
579 unmap->vector.len, DMA_FROM_DEVICE);
580
Rasesh Modye29aa332013-12-17 17:07:35 -0800581 skb_put(skb, len);
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000582 skb->protocol = eth_type_trans(skb, bnad->netdev);
583
584 unmap->skb = NULL;
585 unmap->vector.len = 0;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000586}
587
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700588static u32
Jing Huangb3cc6e82012-04-04 05:44:14 +0000589bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700590{
Rasesh Modye29aa332013-12-17 17:07:35 -0800591 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700592 struct bna_rcb *rcb = NULL;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000593 struct bnad_rx_unmap_q *unmap_q;
Rasesh Modye29aa332013-12-17 17:07:35 -0800594 struct bnad_rx_unmap *unmap = NULL;
595 struct sk_buff *skb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700596 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000597 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
Rasesh Modye29aa332013-12-17 17:07:35 -0800598 u32 packets = 0, len = 0, totlen = 0;
599 u32 pi, vec, sop_ci = 0, nvecs = 0;
600 u32 flags, masked_flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700601
602 prefetch(bnad->netdev);
Rasesh Mody52165622012-12-11 12:24:51 +0000603
604 cq = ccb->sw_q;
Rasesh Mody52165622012-12-11 12:24:51 +0000605
Rasesh Mody17a30a12013-12-17 17:07:37 -0800606 while (packets < budget) {
Ivan Vecerac36c9d52014-07-29 16:29:30 +0200607 cmpl = &cq[ccb->producer_index];
Rasesh Mody17a30a12013-12-17 17:07:37 -0800608 if (!cmpl->valid)
609 break;
610 /* The 'valid' field is set by the adapter, only after writing
611 * the other fields of completion entry. Hence, do not load
612 * other fields of completion entry *before* the 'valid' is
613 * loaded. Adding the rmb() here prevents the compiler and/or
614 * CPU from reordering the reads which would potentially result
615 * in reading stale values in completion entry.
616 */
617 rmb();
618
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700619 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
620
Rasesh Mody078086f2011-08-08 16:21:39 +0000621 if (bna_is_small_rxq(cmpl->rxq_id))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700622 rcb = ccb->rcb[1];
Rasesh Mody078086f2011-08-08 16:21:39 +0000623 else
624 rcb = ccb->rcb[0];
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700625
626 unmap_q = rcb->unmap_q;
627
Rasesh Modye29aa332013-12-17 17:07:35 -0800628 /* start of packet ci */
629 sop_ci = rcb->consumer_index;
630
631 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
632 unmap = &unmap_q->unmap[sop_ci];
633 skb = unmap->skb;
634 } else {
635 skb = napi_get_frags(&rx_ctrl->napi);
636 if (unlikely(!skb))
637 break;
638 }
639 prefetch(skb);
640
641 flags = ntohl(cmpl->flags);
642 len = ntohs(cmpl->length);
643 totlen = len;
644 nvecs = 1;
645
646 /* Check all the completions for this frame.
647 * busy-wait doesn't help much, break here.
648 */
649 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
650 (flags & BNA_CQ_EF_EOP) == 0) {
651 pi = ccb->producer_index;
652 do {
653 BNA_QE_INDX_INC(pi, ccb->q_depth);
654 next_cmpl = &cq[pi];
655
656 if (!next_cmpl->valid)
657 break;
Rasesh Mody17a30a12013-12-17 17:07:37 -0800658 /* The 'valid' field is set by the adapter, only
659 * after writing the other fields of completion
660 * entry. Hence, do not load other fields of
661 * completion entry *before* the 'valid' is
662 * loaded. Adding the rmb() here prevents the
663 * compiler and/or CPU from reordering the reads
664 * which would potentially result in reading
665 * stale values in completion entry.
666 */
667 rmb();
Rasesh Modye29aa332013-12-17 17:07:35 -0800668
669 len = ntohs(next_cmpl->length);
670 flags = ntohl(next_cmpl->flags);
671
672 nvecs++;
673 totlen += len;
674 } while ((flags & BNA_CQ_EF_EOP) == 0);
675
676 if (!next_cmpl->valid)
677 break;
678 }
679
680 /* TODO: BNA_CQ_EF_LOCAL ? */
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
Rasesh Modye29aa332013-12-17 17:07:35 -0800682 BNA_CQ_EF_FCS_ERROR |
683 BNA_CQ_EF_TOO_LONG))) {
684 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700685 rcb->rxq->rx_packets_with_error++;
Rasesh Modye29aa332013-12-17 17:07:35 -0800686
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700687 goto next;
688 }
689
Rasesh Modye29aa332013-12-17 17:07:35 -0800690 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
691 bnad_cq_setup_skb(bnad, skb, unmap, len);
692 else
693 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000694
Rasesh Modye29aa332013-12-17 17:07:35 -0800695 packets++;
696 rcb->rxq->rx_packets++;
697 rcb->rxq->rx_bytes += totlen;
698 ccb->bytes_per_intr += totlen;
Rasesh Mody5e46631f2012-12-11 12:24:50 +0000699
700 masked_flags = flags & flags_cksum_prot_mask;
701
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700702 if (likely
Michał Mirosławe5ee20e2011-04-12 09:38:23 +0000703 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
Rasesh Mody5e46631f2012-12-11 12:24:50 +0000704 ((masked_flags == flags_tcp4) ||
705 (masked_flags == flags_udp4) ||
706 (masked_flags == flags_tcp6) ||
707 (masked_flags == flags_udp6))))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700708 skb->ip_summed = CHECKSUM_UNNECESSARY;
709 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700710 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700711
Ivan Vecera877767d2014-02-28 14:14:03 +0100712 if ((flags & BNA_CQ_EF_VLAN) &&
713 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000714 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700715
Rasesh Modye29aa332013-12-17 17:07:35 -0800716 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000717 netif_receive_skb(skb);
Rasesh Modye29aa332013-12-17 17:07:35 -0800718 else
719 napi_gro_frags(&rx_ctrl->napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700720
721next:
Rasesh Modye29aa332013-12-17 17:07:35 -0800722 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
723 for (vec = 0; vec < nvecs; vec++) {
724 cmpl = &cq[ccb->producer_index];
725 cmpl->valid = 0;
726 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
727 }
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000728 cmpl = &cq[ccb->producer_index];
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700729 }
730
Rasesh Mody30f9fc942012-12-11 12:24:53 +0000731 napi_gro_flush(&rx_ctrl->napi, false);
Rasesh Mody2be67142011-08-30 15:27:39 +0000732 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
Rasesh Mody271e8b72011-08-30 15:27:40 +0000733 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
734
Rasesh Mody52165622012-12-11 12:24:51 +0000735 bnad_rxq_post(bnad, ccb->rcb[0]);
Rasesh Mody2be67142011-08-30 15:27:39 +0000736 if (ccb->rcb[1])
Rasesh Mody52165622012-12-11 12:24:51 +0000737 bnad_rxq_post(bnad, ccb->rcb[1]);
Rasesh Mody078086f2011-08-08 16:21:39 +0000738
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700739 return packets;
740}
741
742static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700743bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
744{
745 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000746 struct napi_struct *napi = &rx_ctrl->napi;
747
748 if (likely(napi_schedule_prep(napi))) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000749 __napi_schedule(napi);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000750 rx_ctrl->rx_schedule++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700751 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700752}
753
754/* MSIX Rx Path Handler */
755static irqreturn_t
756bnad_msix_rx(int irq, void *data)
757{
758 struct bna_ccb *ccb = (struct bna_ccb *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700759
Rasesh Mody271e8b72011-08-30 15:27:40 +0000760 if (ccb) {
761 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
Rasesh Mody2be67142011-08-30 15:27:39 +0000762 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000763 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700764
765 return IRQ_HANDLED;
766}
767
768/* Interrupt handlers */
769
770/* Mbox Interrupt Handlers */
771static irqreturn_t
772bnad_msix_mbox_handler(int irq, void *data)
773{
774 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000775 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000776 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700777
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700778 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Modydfee3252011-08-30 15:27:45 +0000779 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
780 spin_unlock_irqrestore(&bnad->bna_lock, flags);
781 return IRQ_HANDLED;
782 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700783
784 bna_intr_status_get(&bnad->bna, intr_status);
785
Rasesh Mody078086f2011-08-08 16:21:39 +0000786 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700787 bna_mbox_handler(&bnad->bna, intr_status);
788
789 spin_unlock_irqrestore(&bnad->bna_lock, flags);
790
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700791 return IRQ_HANDLED;
792}
793
794static irqreturn_t
795bnad_isr(int irq, void *data)
796{
797 int i, j;
798 u32 intr_status;
799 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000800 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700801 struct bnad_rx_info *rx_info;
802 struct bnad_rx_ctrl *rx_ctrl;
Rasesh Mody078086f2011-08-08 16:21:39 +0000803 struct bna_tcb *tcb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700804
Rasesh Modydfee3252011-08-30 15:27:45 +0000805 spin_lock_irqsave(&bnad->bna_lock, flags);
806 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
807 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000808 return IRQ_NONE;
Rasesh Modydfee3252011-08-30 15:27:45 +0000809 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700810
811 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000812
Rasesh Modydfee3252011-08-30 15:27:45 +0000813 if (unlikely(!intr_status)) {
814 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700815 return IRQ_NONE;
Rasesh Modydfee3252011-08-30 15:27:45 +0000816 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700817
Rasesh Mody078086f2011-08-08 16:21:39 +0000818 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700819 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000820
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700821 spin_unlock_irqrestore(&bnad->bna_lock, flags);
822
Rasesh Modybe7fa322010-12-23 21:45:01 +0000823 if (!BNA_IS_INTX_DATA_INTR(intr_status))
824 return IRQ_HANDLED;
825
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700826 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000827 /* Tx processing */
828 for (i = 0; i < bnad->num_tx; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000829 for (j = 0; j < bnad->num_txq_per_tx; j++) {
830 tcb = bnad->tx_info[i].tcb[j];
831 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Jing Huangb3cc6e82012-04-04 05:44:14 +0000832 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
Rasesh Mody078086f2011-08-08 16:21:39 +0000833 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000834 }
835 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700836 for (i = 0; i < bnad->num_rx; i++) {
837 rx_info = &bnad->rx_info[i];
838 if (!rx_info->rx)
839 continue;
840 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
841 rx_ctrl = &rx_info->rx_ctrl[j];
842 if (rx_ctrl->ccb)
843 bnad_netif_rx_schedule_poll(bnad,
844 rx_ctrl->ccb);
845 }
846 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700847 return IRQ_HANDLED;
848}
849
850/*
851 * Called in interrupt / callback context
852 * with bna_lock held, so cfg_flags access is OK
853 */
854static void
855bnad_enable_mbox_irq(struct bnad *bnad)
856{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000857 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000858
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700859 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
860}
861
862/*
863 * Called with bnad->bna_lock held b'cos of
864 * bnad->cfg_flags access.
865 */
Rasesh Modyb7ee31c52010-10-05 15:46:05 +0000866static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700867bnad_disable_mbox_irq(struct bnad *bnad)
868{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000869 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000870
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700871 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
872}
873
Rasesh Modybe7fa322010-12-23 21:45:01 +0000874static void
875bnad_set_netdev_perm_addr(struct bnad *bnad)
876{
877 struct net_device *netdev = bnad->netdev;
878
Ivan Vecerad6b30592015-06-11 15:52:14 +0200879 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000880 if (is_zero_ether_addr(netdev->dev_addr))
Ivan Vecerad6b30592015-06-11 15:52:14 +0200881 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000882}
883
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700884/* Control Path Handlers */
885
886/* Callbacks */
887void
Rasesh Mody078086f2011-08-08 16:21:39 +0000888bnad_cb_mbox_intr_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700889{
890 bnad_enable_mbox_irq(bnad);
891}
892
893void
Rasesh Mody078086f2011-08-08 16:21:39 +0000894bnad_cb_mbox_intr_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700895{
896 bnad_disable_mbox_irq(bnad);
897}
898
899void
Rasesh Mody078086f2011-08-08 16:21:39 +0000900bnad_cb_ioceth_ready(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700901{
Rasesh Mody078086f2011-08-08 16:21:39 +0000902 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700903 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700904}
905
906void
Rasesh Mody078086f2011-08-08 16:21:39 +0000907bnad_cb_ioceth_failed(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700908{
Rasesh Mody078086f2011-08-08 16:21:39 +0000909 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700910 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody078086f2011-08-08 16:21:39 +0000911}
912
913void
914bnad_cb_ioceth_disabled(struct bnad *bnad)
915{
916 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
917 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700918}
919
920static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000921bnad_cb_enet_disabled(void *arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700922{
923 struct bnad *bnad = (struct bnad *)arg;
924
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700925 netif_carrier_off(bnad->netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +0000926 complete(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700927}
928
929void
Rasesh Mody078086f2011-08-08 16:21:39 +0000930bnad_cb_ethport_link_status(struct bnad *bnad,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700931 enum bna_link_status link_status)
932{
Rusty Russell3db1cd52011-12-19 13:56:45 +0000933 bool link_up = false;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700934
935 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
936
937 if (link_status == BNA_CEE_UP) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000938 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
939 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700940 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000941 } else {
942 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
943 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700944 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000945 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700946
947 if (link_up) {
948 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000949 uint tx_id, tcb_id;
950 printk(KERN_WARNING "bna: %s link up\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700951 bnad->netdev->name);
952 netif_carrier_on(bnad->netdev);
953 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Mody078086f2011-08-08 16:21:39 +0000954 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
955 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
956 tcb_id++) {
957 struct bna_tcb *tcb =
958 bnad->tx_info[tx_id].tcb[tcb_id];
959 u32 txq_id;
960 if (!tcb)
961 continue;
962
963 txq_id = tcb->id;
964
965 if (test_bit(BNAD_TXQ_TX_STARTED,
966 &tcb->flags)) {
967 /*
968 * Force an immediate
969 * Transmit Schedule */
970 printk(KERN_INFO "bna: %s %d "
971 "TXQ_STARTED\n",
972 bnad->netdev->name,
973 txq_id);
974 netif_wake_subqueue(
975 bnad->netdev,
976 txq_id);
977 BNAD_UPDATE_CTR(bnad,
978 netif_queue_wakeup);
979 } else {
980 netif_stop_subqueue(
981 bnad->netdev,
982 txq_id);
983 BNAD_UPDATE_CTR(bnad,
984 netif_queue_stop);
985 }
986 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700987 }
988 }
989 } else {
990 if (netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000991 printk(KERN_WARNING "bna: %s link down\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700992 bnad->netdev->name);
993 netif_carrier_off(bnad->netdev);
994 BNAD_UPDATE_CTR(bnad, link_toggle);
995 }
996 }
997}
998
999static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001000bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001001{
1002 struct bnad *bnad = (struct bnad *)arg;
1003
1004 complete(&bnad->bnad_completions.tx_comp);
1005}
1006
1007static void
1008bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1009{
1010 struct bnad_tx_info *tx_info =
1011 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001012
Rasesh Mody52165622012-12-11 12:24:51 +00001013 tcb->priv = tcb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001014 tx_info->tcb[tcb->id] = tcb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001015}
1016
1017static void
1018bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1019{
1020 struct bnad_tx_info *tx_info =
1021 (struct bnad_tx_info *)tcb->txq->tx->priv;
1022
1023 tx_info->tcb[tcb->id] = NULL;
Jing Huang01b54b12012-04-04 05:43:18 +00001024 tcb->priv = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001025}
1026
1027static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001028bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1029{
1030 struct bnad_rx_info *rx_info =
1031 (struct bnad_rx_info *)ccb->cq->rx->priv;
1032
1033 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1034 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1035}
1036
1037static void
1038bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1039{
1040 struct bnad_rx_info *rx_info =
1041 (struct bnad_rx_info *)ccb->cq->rx->priv;
1042
1043 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1044}
1045
1046static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001047bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001048{
1049 struct bnad_tx_info *tx_info =
Rasesh Mody078086f2011-08-08 16:21:39 +00001050 (struct bnad_tx_info *)tx->priv;
1051 struct bna_tcb *tcb;
1052 u32 txq_id;
1053 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001054
Rasesh Mody078086f2011-08-08 16:21:39 +00001055 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1056 tcb = tx_info->tcb[i];
1057 if (!tcb)
1058 continue;
1059 txq_id = tcb->id;
1060 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1061 netif_stop_subqueue(bnad->netdev, txq_id);
1062 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1063 bnad->netdev->name, txq_id);
1064 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001065}
1066
1067static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001068bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001069{
Rasesh Mody078086f2011-08-08 16:21:39 +00001070 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1071 struct bna_tcb *tcb;
Rasesh Mody078086f2011-08-08 16:21:39 +00001072 u32 txq_id;
1073 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001074
Rasesh Mody078086f2011-08-08 16:21:39 +00001075 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1076 tcb = tx_info->tcb[i];
1077 if (!tcb)
1078 continue;
1079 txq_id = tcb->id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001080
Jing Huang01b54b12012-04-04 05:43:18 +00001081 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
Rasesh Mody078086f2011-08-08 16:21:39 +00001082 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
Jing Huang01b54b12012-04-04 05:43:18 +00001083 BUG_ON(*(tcb->hw_consumer_index) != 0);
Rasesh Mody078086f2011-08-08 16:21:39 +00001084
1085 if (netif_carrier_ok(bnad->netdev)) {
1086 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1087 bnad->netdev->name, txq_id);
1088 netif_wake_subqueue(bnad->netdev, txq_id);
1089 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1090 }
1091 }
Rasesh Modybe7fa322010-12-23 21:45:01 +00001092
1093 /*
Rasesh Mody078086f2011-08-08 16:21:39 +00001094 * Workaround for first ioceth enable failure & we
Rasesh Modybe7fa322010-12-23 21:45:01 +00001095 * get a 0 MAC address. We try to get the MAC address
1096 * again here.
1097 */
Ivan Vecerad6b30592015-06-11 15:52:14 +02001098 if (is_zero_ether_addr(bnad->perm_addr)) {
1099 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001100 bnad_set_netdev_perm_addr(bnad);
1101 }
Rasesh Mody078086f2011-08-08 16:21:39 +00001102}
Rasesh Modybe7fa322010-12-23 21:45:01 +00001103
Jing Huang01b54b12012-04-04 05:43:18 +00001104/*
1105 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1106 */
1107static void
1108bnad_tx_cleanup(struct delayed_work *work)
1109{
1110 struct bnad_tx_info *tx_info =
1111 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1112 struct bnad *bnad = NULL;
Jing Huang01b54b12012-04-04 05:43:18 +00001113 struct bna_tcb *tcb;
1114 unsigned long flags;
Rasesh Mody52165622012-12-11 12:24:51 +00001115 u32 i, pending = 0;
Jing Huang01b54b12012-04-04 05:43:18 +00001116
1117 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1118 tcb = tx_info->tcb[i];
1119 if (!tcb)
1120 continue;
1121
1122 bnad = tcb->bnad;
1123
1124 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1125 pending++;
1126 continue;
1127 }
1128
Jing Huangb3cc6e82012-04-04 05:44:14 +00001129 bnad_txq_cleanup(bnad, tcb);
Jing Huang01b54b12012-04-04 05:43:18 +00001130
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001131 smp_mb__before_atomic();
Jing Huang01b54b12012-04-04 05:43:18 +00001132 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1133 }
1134
1135 if (pending) {
1136 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1137 msecs_to_jiffies(1));
1138 return;
1139 }
1140
1141 spin_lock_irqsave(&bnad->bna_lock, flags);
1142 bna_tx_cleanup_complete(tx_info->tx);
1143 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1144}
1145
Rasesh Mody078086f2011-08-08 16:21:39 +00001146static void
1147bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1148{
1149 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1150 struct bna_tcb *tcb;
1151 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001152
Rasesh Mody078086f2011-08-08 16:21:39 +00001153 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1154 tcb = tx_info->tcb[i];
1155 if (!tcb)
1156 continue;
1157 }
1158
Jing Huang01b54b12012-04-04 05:43:18 +00001159 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
Rasesh Mody078086f2011-08-08 16:21:39 +00001160}
1161
1162static void
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +00001163bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1164{
1165 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1166 struct bna_ccb *ccb;
1167 struct bnad_rx_ctrl *rx_ctrl;
1168 int i;
1169
1170 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1171 rx_ctrl = &rx_info->rx_ctrl[i];
1172 ccb = rx_ctrl->ccb;
1173 if (!ccb)
1174 continue;
1175
1176 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1177
1178 if (ccb->rcb[1])
1179 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1180 }
1181}
1182
Jing Huang01b54b12012-04-04 05:43:18 +00001183/*
1184 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1185 */
1186static void
1187bnad_rx_cleanup(void *work)
1188{
1189 struct bnad_rx_info *rx_info =
1190 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1191 struct bnad_rx_ctrl *rx_ctrl;
1192 struct bnad *bnad = NULL;
1193 unsigned long flags;
Rasesh Mody52165622012-12-11 12:24:51 +00001194 u32 i;
Jing Huang01b54b12012-04-04 05:43:18 +00001195
1196 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1197 rx_ctrl = &rx_info->rx_ctrl[i];
1198
1199 if (!rx_ctrl->ccb)
1200 continue;
1201
1202 bnad = rx_ctrl->ccb->bnad;
1203
1204 /*
1205 * Wait till the poll handler has exited
1206 * and nothing can be scheduled anymore
1207 */
1208 napi_disable(&rx_ctrl->napi);
1209
Jing Huangb3cc6e82012-04-04 05:44:14 +00001210 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1211 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
Jing Huang01b54b12012-04-04 05:43:18 +00001212 if (rx_ctrl->ccb->rcb[1])
Jing Huangb3cc6e82012-04-04 05:44:14 +00001213 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
Jing Huang01b54b12012-04-04 05:43:18 +00001214 }
1215
1216 spin_lock_irqsave(&bnad->bna_lock, flags);
1217 bna_rx_cleanup_complete(rx_info->rx);
1218 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1219}
1220
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +00001221static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001222bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1223{
1224 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1225 struct bna_ccb *ccb;
1226 struct bnad_rx_ctrl *rx_ctrl;
1227 int i;
1228
Rasesh Mody772b5232011-08-30 15:27:37 +00001229 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +00001230 rx_ctrl = &rx_info->rx_ctrl[i];
1231 ccb = rx_ctrl->ccb;
1232 if (!ccb)
1233 continue;
1234
1235 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1236
1237 if (ccb->rcb[1])
1238 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001239 }
1240
Jing Huang01b54b12012-04-04 05:43:18 +00001241 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
Rasesh Mody078086f2011-08-08 16:21:39 +00001242}
1243
1244static void
1245bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1246{
1247 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1248 struct bna_ccb *ccb;
1249 struct bna_rcb *rcb;
1250 struct bnad_rx_ctrl *rx_ctrl;
Rasesh Mody30f9fc942012-12-11 12:24:53 +00001251 int i, j;
Rasesh Mody078086f2011-08-08 16:21:39 +00001252
Rasesh Mody772b5232011-08-30 15:27:37 +00001253 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +00001254 rx_ctrl = &rx_info->rx_ctrl[i];
1255 ccb = rx_ctrl->ccb;
1256 if (!ccb)
1257 continue;
1258
Jing Huang01b54b12012-04-04 05:43:18 +00001259 napi_enable(&rx_ctrl->napi);
Rasesh Mody078086f2011-08-08 16:21:39 +00001260
1261 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1262 rcb = ccb->rcb[j];
1263 if (!rcb)
1264 continue;
Rasesh Mody078086f2011-08-08 16:21:39 +00001265
Rasesh Mody30f9fc942012-12-11 12:24:53 +00001266 bnad_rxq_alloc_init(bnad, rcb);
Rasesh Mody078086f2011-08-08 16:21:39 +00001267 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +00001268 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
Rasesh Mody52165622012-12-11 12:24:51 +00001269 bnad_rxq_post(bnad, rcb);
Rasesh Mody078086f2011-08-08 16:21:39 +00001270 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001271 }
1272}
1273
1274static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001275bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001276{
1277 struct bnad *bnad = (struct bnad *)arg;
1278
1279 complete(&bnad->bnad_completions.rx_comp);
1280}
1281
1282static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001283bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001284{
Rasesh Mody078086f2011-08-08 16:21:39 +00001285 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001286 complete(&bnad->bnad_completions.mcast_comp);
1287}
1288
1289void
1290bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1291 struct bna_stats *stats)
1292{
1293 if (status == BNA_CB_SUCCESS)
1294 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1295
1296 if (!netif_running(bnad->netdev) ||
1297 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1298 return;
1299
1300 mod_timer(&bnad->stats_timer,
1301 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1302}
1303
Rasesh Mody078086f2011-08-08 16:21:39 +00001304static void
1305bnad_cb_enet_mtu_set(struct bnad *bnad)
1306{
1307 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1308 complete(&bnad->bnad_completions.mtu_comp);
1309}
1310
Krishna Gudipati72a97302011-12-22 13:29:45 +00001311void
1312bnad_cb_completion(void *arg, enum bfa_status status)
1313{
1314 struct bnad_iocmd_comp *iocmd_comp =
1315 (struct bnad_iocmd_comp *)arg;
1316
1317 iocmd_comp->comp_status = (u32) status;
1318 complete(&iocmd_comp->comp);
1319}
1320
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001321/* Resource allocation, free functions */
1322
1323static void
1324bnad_mem_free(struct bnad *bnad,
1325 struct bna_mem_info *mem_info)
1326{
1327 int i;
1328 dma_addr_t dma_pa;
1329
1330 if (mem_info->mdl == NULL)
1331 return;
1332
1333 for (i = 0; i < mem_info->num; i++) {
1334 if (mem_info->mdl[i].kva != NULL) {
1335 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1336 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1337 dma_pa);
Ivan Vecera5ea74312011-02-02 04:37:02 +00001338 dma_free_coherent(&bnad->pcidev->dev,
1339 mem_info->mdl[i].len,
1340 mem_info->mdl[i].kva, dma_pa);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001341 } else
1342 kfree(mem_info->mdl[i].kva);
1343 }
1344 }
1345 kfree(mem_info->mdl);
1346 mem_info->mdl = NULL;
1347}
1348
1349static int
1350bnad_mem_alloc(struct bnad *bnad,
1351 struct bna_mem_info *mem_info)
1352{
1353 int i;
1354 dma_addr_t dma_pa;
1355
1356 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1357 mem_info->mdl = NULL;
1358 return 0;
1359 }
1360
1361 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1362 GFP_KERNEL);
1363 if (mem_info->mdl == NULL)
1364 return -ENOMEM;
1365
1366 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1367 for (i = 0; i < mem_info->num; i++) {
1368 mem_info->mdl[i].len = mem_info->len;
1369 mem_info->mdl[i].kva =
Ivan Vecera5ea74312011-02-02 04:37:02 +00001370 dma_alloc_coherent(&bnad->pcidev->dev,
Joe Perches1f9061d22013-03-15 07:23:58 +00001371 mem_info->len, &dma_pa,
1372 GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001373 if (mem_info->mdl[i].kva == NULL)
1374 goto err_return;
1375
1376 BNA_SET_DMA_ADDR(dma_pa,
1377 &(mem_info->mdl[i].dma));
1378 }
1379 } else {
1380 for (i = 0; i < mem_info->num; i++) {
1381 mem_info->mdl[i].len = mem_info->len;
1382 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1383 GFP_KERNEL);
1384 if (mem_info->mdl[i].kva == NULL)
1385 goto err_return;
1386 }
1387 }
1388
1389 return 0;
1390
1391err_return:
1392 bnad_mem_free(bnad, mem_info);
1393 return -ENOMEM;
1394}
1395
1396/* Free IRQ for Mailbox */
1397static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001398bnad_mbox_irq_free(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001399{
1400 int irq;
1401 unsigned long flags;
1402
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001403 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001404 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001405 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001406
1407 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001408 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001409}
1410
1411/*
1412 * Allocates IRQ for Mailbox, but keep it disabled
1413 * This will be enabled once we get the mbox enable callback
1414 * from bna
1415 */
1416static int
Rasesh Mody078086f2011-08-08 16:21:39 +00001417bnad_mbox_irq_alloc(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001418{
Rasesh Mody0120b992011-07-22 08:07:41 +00001419 int err = 0;
1420 unsigned long irq_flags, flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001421 u32 irq;
Rasesh Mody0120b992011-07-22 08:07:41 +00001422 irq_handler_t irq_handler;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001423
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001424 spin_lock_irqsave(&bnad->bna_lock, flags);
1425 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1426 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
Rasesh Mody8811e262011-07-22 08:07:44 +00001427 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Shyam Iyer82791712011-07-14 15:00:32 +00001428 irq_flags = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001429 } else {
1430 irq_handler = (irq_handler_t)bnad_isr;
1431 irq = bnad->pcidev->irq;
Shyam Iyer5f778982011-06-28 08:58:05 +00001432 irq_flags = IRQF_SHARED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001433 }
Rasesh Mody8811e262011-07-22 08:07:44 +00001434
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001436 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1437
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001438 /*
1439 * Set the Mbox IRQ disable flag, so that the IRQ handler
1440 * called from request_irq() for SHARED IRQs do not execute
1441 */
1442 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1443
Rasesh Modybe7fa322010-12-23 21:45:01 +00001444 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1445
Shyam Iyer82791712011-07-14 15:00:32 +00001446 err = request_irq(irq, irq_handler, irq_flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001447 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001448
Rasesh Modybe7fa322010-12-23 21:45:01 +00001449 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001450}
1451
1452static void
1453bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1454{
1455 kfree(intr_info->idl);
1456 intr_info->idl = NULL;
1457}
1458
1459/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1460static int
1461bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
Rasesh Mody078086f2011-08-08 16:21:39 +00001462 u32 txrx_id, struct bna_intr_info *intr_info)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001463{
1464 int i, vector_start = 0;
1465 u32 cfg_flags;
1466 unsigned long flags;
1467
1468 spin_lock_irqsave(&bnad->bna_lock, flags);
1469 cfg_flags = bnad->cfg_flags;
1470 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1471
1472 if (cfg_flags & BNAD_CF_MSIX) {
1473 intr_info->intr_type = BNA_INTR_T_MSIX;
1474 intr_info->idl = kcalloc(intr_info->num,
1475 sizeof(struct bna_intr_descr),
1476 GFP_KERNEL);
1477 if (!intr_info->idl)
1478 return -ENOMEM;
1479
1480 switch (src) {
1481 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001482 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001483 break;
1484
1485 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001486 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1487 (bnad->num_tx * bnad->num_txq_per_tx) +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001488 txrx_id;
1489 break;
1490
1491 default:
1492 BUG();
1493 }
1494
1495 for (i = 0; i < intr_info->num; i++)
1496 intr_info->idl[i].vector = vector_start + i;
1497 } else {
1498 intr_info->intr_type = BNA_INTR_T_INTX;
1499 intr_info->num = 1;
1500 intr_info->idl = kcalloc(intr_info->num,
1501 sizeof(struct bna_intr_descr),
1502 GFP_KERNEL);
1503 if (!intr_info->idl)
1504 return -ENOMEM;
1505
1506 switch (src) {
1507 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001508 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001509 break;
1510
1511 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001512 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001513 break;
1514 }
1515 }
1516 return 0;
1517}
1518
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001519/* NOTE: Should be called for MSIX only
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001520 * Unregisters Tx MSIX vector(s) from the kernel
1521 */
1522static void
1523bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1524 int num_txqs)
1525{
1526 int i;
1527 int vector_num;
1528
1529 for (i = 0; i < num_txqs; i++) {
1530 if (tx_info->tcb[i] == NULL)
1531 continue;
1532
1533 vector_num = tx_info->tcb[i]->intr_vector;
1534 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1535 }
1536}
1537
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001538/* NOTE: Should be called for MSIX only
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001539 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1540 */
1541static int
1542bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001543 u32 tx_id, int num_txqs)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001544{
1545 int i;
1546 int err;
1547 int vector_num;
1548
1549 for (i = 0; i < num_txqs; i++) {
1550 vector_num = tx_info->tcb[i]->intr_vector;
1551 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1552 tx_id + tx_info->tcb[i]->id);
1553 err = request_irq(bnad->msix_table[vector_num].vector,
1554 (irq_handler_t)bnad_msix_tx, 0,
1555 tx_info->tcb[i]->name,
1556 tx_info->tcb[i]);
1557 if (err)
1558 goto err_return;
1559 }
1560
1561 return 0;
1562
1563err_return:
1564 if (i > 0)
1565 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1566 return -1;
1567}
1568
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001569/* NOTE: Should be called for MSIX only
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001570 * Unregisters Rx MSIX vector(s) from the kernel
1571 */
1572static void
1573bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1574 int num_rxps)
1575{
1576 int i;
1577 int vector_num;
1578
1579 for (i = 0; i < num_rxps; i++) {
1580 if (rx_info->rx_ctrl[i].ccb == NULL)
1581 continue;
1582
1583 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1584 free_irq(bnad->msix_table[vector_num].vector,
1585 rx_info->rx_ctrl[i].ccb);
1586 }
1587}
1588
Ben Hutchings1aa8b472012-07-10 10:56:59 +00001589/* NOTE: Should be called for MSIX only
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001590 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1591 */
1592static int
1593bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001594 u32 rx_id, int num_rxps)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001595{
1596 int i;
1597 int err;
1598 int vector_num;
1599
1600 for (i = 0; i < num_rxps; i++) {
1601 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1602 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1603 bnad->netdev->name,
1604 rx_id + rx_info->rx_ctrl[i].ccb->id);
1605 err = request_irq(bnad->msix_table[vector_num].vector,
1606 (irq_handler_t)bnad_msix_rx, 0,
1607 rx_info->rx_ctrl[i].ccb->name,
1608 rx_info->rx_ctrl[i].ccb);
1609 if (err)
1610 goto err_return;
1611 }
1612
1613 return 0;
1614
1615err_return:
1616 if (i > 0)
1617 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1618 return -1;
1619}
1620
1621/* Free Tx object Resources */
1622static void
1623bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1624{
1625 int i;
1626
1627 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1628 if (res_info[i].res_type == BNA_RES_T_MEM)
1629 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1630 else if (res_info[i].res_type == BNA_RES_T_INTR)
1631 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1632 }
1633}
1634
1635/* Allocates memory and interrupt resources for Tx object */
1636static int
1637bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001638 u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001639{
1640 int i, err = 0;
1641
1642 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1643 if (res_info[i].res_type == BNA_RES_T_MEM)
1644 err = bnad_mem_alloc(bnad,
1645 &res_info[i].res_u.mem_info);
1646 else if (res_info[i].res_type == BNA_RES_T_INTR)
1647 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1648 &res_info[i].res_u.intr_info);
1649 if (err)
1650 goto err_return;
1651 }
1652 return 0;
1653
1654err_return:
1655 bnad_tx_res_free(bnad, res_info);
1656 return err;
1657}
1658
1659/* Free Rx object Resources */
1660static void
1661bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1662{
1663 int i;
1664
1665 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1666 if (res_info[i].res_type == BNA_RES_T_MEM)
1667 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1668 else if (res_info[i].res_type == BNA_RES_T_INTR)
1669 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1670 }
1671}
1672
1673/* Allocates memory and interrupt resources for Rx object */
1674static int
1675bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1676 uint rx_id)
1677{
1678 int i, err = 0;
1679
1680 /* All memory needs to be allocated before setup_ccbs */
1681 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1682 if (res_info[i].res_type == BNA_RES_T_MEM)
1683 err = bnad_mem_alloc(bnad,
1684 &res_info[i].res_u.mem_info);
1685 else if (res_info[i].res_type == BNA_RES_T_INTR)
1686 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1687 &res_info[i].res_u.intr_info);
1688 if (err)
1689 goto err_return;
1690 }
1691 return 0;
1692
1693err_return:
1694 bnad_rx_res_free(bnad, res_info);
1695 return err;
1696}
1697
1698/* Timer callbacks */
1699/* a) IOC timer */
1700static void
1701bnad_ioc_timeout(unsigned long data)
1702{
1703 struct bnad *bnad = (struct bnad *)data;
1704 unsigned long flags;
1705
1706 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001707 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001708 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1709}
1710
1711static void
1712bnad_ioc_hb_check(unsigned long data)
1713{
1714 struct bnad *bnad = (struct bnad *)data;
1715 unsigned long flags;
1716
1717 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001718 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1720}
1721
1722static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001723bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001724{
1725 struct bnad *bnad = (struct bnad *)data;
1726 unsigned long flags;
1727
1728 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001729 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001730 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1731}
1732
1733static void
1734bnad_iocpf_sem_timeout(unsigned long data)
1735{
1736 struct bnad *bnad = (struct bnad *)data;
1737 unsigned long flags;
1738
1739 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001740 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001741 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1742}
1743
1744/*
1745 * All timer routines use bnad->bna_lock to protect against
1746 * the following race, which may occur in case of no locking:
Rasesh Mody0120b992011-07-22 08:07:41 +00001747 * Time CPU m CPU n
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001748 * 0 1 = test_bit
1749 * 1 clear_bit
1750 * 2 del_timer_sync
1751 * 3 mod_timer
1752 */
1753
1754/* b) Dynamic Interrupt Moderation Timer */
1755static void
1756bnad_dim_timeout(unsigned long data)
1757{
1758 struct bnad *bnad = (struct bnad *)data;
1759 struct bnad_rx_info *rx_info;
1760 struct bnad_rx_ctrl *rx_ctrl;
1761 int i, j;
1762 unsigned long flags;
1763
1764 if (!netif_carrier_ok(bnad->netdev))
1765 return;
1766
1767 spin_lock_irqsave(&bnad->bna_lock, flags);
1768 for (i = 0; i < bnad->num_rx; i++) {
1769 rx_info = &bnad->rx_info[i];
1770 if (!rx_info->rx)
1771 continue;
1772 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1773 rx_ctrl = &rx_info->rx_ctrl[j];
1774 if (!rx_ctrl->ccb)
1775 continue;
1776 bna_rx_dim_update(rx_ctrl->ccb);
1777 }
1778 }
1779
1780 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1781 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1782 mod_timer(&bnad->dim_timer,
1783 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1784 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1785}
1786
1787/* c) Statistics Timer */
1788static void
1789bnad_stats_timeout(unsigned long data)
1790{
1791 struct bnad *bnad = (struct bnad *)data;
1792 unsigned long flags;
1793
1794 if (!netif_running(bnad->netdev) ||
1795 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1796 return;
1797
1798 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001799 bna_hw_stats_get(&bnad->bna);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001800 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1801}
1802
1803/*
1804 * Set up timer for DIM
1805 * Called with bnad->bna_lock held
1806 */
1807void
1808bnad_dim_timer_start(struct bnad *bnad)
1809{
1810 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1811 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1812 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1813 (unsigned long)bnad);
1814 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1815 mod_timer(&bnad->dim_timer,
1816 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1817 }
1818}
1819
1820/*
1821 * Set up timer for statistics
1822 * Called with mutex_lock(&bnad->conf_mutex) held
1823 */
1824static void
1825bnad_stats_timer_start(struct bnad *bnad)
1826{
1827 unsigned long flags;
1828
1829 spin_lock_irqsave(&bnad->bna_lock, flags);
1830 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1831 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1832 (unsigned long)bnad);
1833 mod_timer(&bnad->stats_timer,
1834 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1835 }
1836 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001837}
1838
1839/*
1840 * Stops the stats timer
1841 * Called with mutex_lock(&bnad->conf_mutex) held
1842 */
1843static void
1844bnad_stats_timer_stop(struct bnad *bnad)
1845{
1846 int to_del = 0;
1847 unsigned long flags;
1848
1849 spin_lock_irqsave(&bnad->bna_lock, flags);
1850 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1851 to_del = 1;
1852 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1853 if (to_del)
1854 del_timer_sync(&bnad->stats_timer);
1855}
1856
1857/* Utilities */
1858
1859static void
1860bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1861{
1862 int i = 1; /* Index 0 has broadcast address */
1863 struct netdev_hw_addr *mc_addr;
1864
1865 netdev_for_each_mc_addr(mc_addr, netdev) {
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +02001866 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001867 i++;
1868 }
1869}
1870
1871static int
1872bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1873{
1874 struct bnad_rx_ctrl *rx_ctrl =
1875 container_of(napi, struct bnad_rx_ctrl, napi);
Rasesh Mody2be67142011-08-30 15:27:39 +00001876 struct bnad *bnad = rx_ctrl->bnad;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001877 int rcvd = 0;
1878
Rasesh Mody271e8b72011-08-30 15:27:40 +00001879 rx_ctrl->rx_poll_ctr++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001880
1881 if (!netif_carrier_ok(bnad->netdev))
1882 goto poll_exit;
1883
Jing Huangb3cc6e82012-04-04 05:44:14 +00001884 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001885 if (rcvd >= budget)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001886 return rcvd;
1887
1888poll_exit:
Rasesh Mody19dbff92011-08-30 15:27:41 +00001889 napi_complete(napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001890
Rasesh Mody271e8b72011-08-30 15:27:40 +00001891 rx_ctrl->rx_complete++;
Rasesh Mody2be67142011-08-30 15:27:39 +00001892
1893 if (rx_ctrl->ccb)
Rasesh Mody271e8b72011-08-30 15:27:40 +00001894 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1895
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001896 return rcvd;
1897}
1898
Rasesh Mody2be67142011-08-30 15:27:39 +00001899#define BNAD_NAPI_POLL_QUOTA 64
1900static void
Jing Huang01b54b12012-04-04 05:43:18 +00001901bnad_napi_add(struct bnad *bnad, u32 rx_id)
Rasesh Mody2be67142011-08-30 15:27:39 +00001902{
1903 struct bnad_rx_ctrl *rx_ctrl;
1904 int i;
1905
1906 /* Initialize & enable NAPI */
1907 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1908 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1909 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1910 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1911 }
1912}
1913
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001914static void
Jing Huang01b54b12012-04-04 05:43:18 +00001915bnad_napi_delete(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001916{
1917 int i;
1918
1919 /* First disable and then clean up */
Jing Huang01b54b12012-04-04 05:43:18 +00001920 for (i = 0; i < bnad->num_rxp_per_rx; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001921 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001922}
1923
1924/* Should be held with conf_lock held */
1925void
Jing Huangb3cc6e82012-04-04 05:44:14 +00001926bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001927{
1928 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1929 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1930 unsigned long flags;
1931
1932 if (!tx_info->tx)
1933 return;
1934
1935 init_completion(&bnad->bnad_completions.tx_comp);
1936 spin_lock_irqsave(&bnad->bna_lock, flags);
1937 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939 wait_for_completion(&bnad->bnad_completions.tx_comp);
1940
1941 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1942 bnad_tx_msix_unregister(bnad, tx_info,
1943 bnad->num_txq_per_tx);
1944
1945 spin_lock_irqsave(&bnad->bna_lock, flags);
1946 bna_tx_destroy(tx_info->tx);
1947 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1948
1949 tx_info->tx = NULL;
Rasesh Mody078086f2011-08-08 16:21:39 +00001950 tx_info->tx_id = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001951
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001952 bnad_tx_res_free(bnad, res_info);
1953}
1954
1955/* Should be held with conf_lock held */
1956int
Rasesh Mody078086f2011-08-08 16:21:39 +00001957bnad_setup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001958{
1959 int err;
1960 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1961 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1962 struct bna_intr_info *intr_info =
1963 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1964 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
stephen hemmingerd91d25d2011-09-16 11:09:51 +00001965 static const struct bna_tx_event_cbfn tx_cbfn = {
1966 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1967 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1968 .tx_stall_cbfn = bnad_cb_tx_stall,
1969 .tx_resume_cbfn = bnad_cb_tx_resume,
1970 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1971 };
1972
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001973 struct bna_tx *tx;
1974 unsigned long flags;
1975
Rasesh Mody078086f2011-08-08 16:21:39 +00001976 tx_info->tx_id = tx_id;
1977
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001978 /* Initialize the Tx object configuration */
1979 tx_config->num_txq = bnad->num_txq_per_tx;
1980 tx_config->txq_depth = bnad->txq_depth;
1981 tx_config->tx_type = BNA_TX_T_REGULAR;
Rasesh Mody078086f2011-08-08 16:21:39 +00001982 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001983
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001984 /* Get BNA's resource requirement for one tx object */
1985 spin_lock_irqsave(&bnad->bna_lock, flags);
1986 bna_tx_res_req(bnad->num_txq_per_tx,
1987 bnad->txq_depth, res_info);
1988 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1989
1990 /* Fill Unmap Q memory requirements */
Rasesh Mody52165622012-12-11 12:24:51 +00001991 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1992 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1993 bnad->txq_depth));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001994
1995 /* Allocate resources */
1996 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1997 if (err)
1998 return err;
1999
2000 /* Ask BNA to create one Tx object, supplying required resources */
2001 spin_lock_irqsave(&bnad->bna_lock, flags);
2002 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2003 tx_info);
2004 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Modyf29eeb72013-12-17 17:07:39 -08002005 if (!tx) {
2006 err = -ENOMEM;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002007 goto err_return;
Rasesh Modyf29eeb72013-12-17 17:07:39 -08002008 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002009 tx_info->tx = tx;
2010
Jing Huang01b54b12012-04-04 05:43:18 +00002011 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2012 (work_func_t)bnad_tx_cleanup);
2013
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002014 /* Register ISR for the Tx object */
2015 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2016 err = bnad_tx_msix_register(bnad, tx_info,
2017 tx_id, bnad->num_txq_per_tx);
2018 if (err)
Rasesh Modyf29eeb72013-12-17 17:07:39 -08002019 goto cleanup_tx;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002020 }
2021
2022 spin_lock_irqsave(&bnad->bna_lock, flags);
2023 bna_tx_enable(tx);
2024 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2025
2026 return 0;
2027
Rasesh Modyf29eeb72013-12-17 17:07:39 -08002028cleanup_tx:
2029 spin_lock_irqsave(&bnad->bna_lock, flags);
2030 bna_tx_destroy(tx_info->tx);
2031 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2032 tx_info->tx = NULL;
2033 tx_info->tx_id = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002034err_return:
2035 bnad_tx_res_free(bnad, res_info);
2036 return err;
2037}
2038
2039/* Setup the rx config for bna_rx_create */
2040/* bnad decides the configuration */
2041static void
2042bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2043{
Rasesh Modye29aa332013-12-17 17:07:35 -08002044 memset(rx_config, 0, sizeof(*rx_config));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002045 rx_config->rx_type = BNA_RX_T_REGULAR;
2046 rx_config->num_paths = bnad->num_rxp_per_rx;
Rasesh Mody078086f2011-08-08 16:21:39 +00002047 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002048
2049 if (bnad->num_rxp_per_rx > 1) {
2050 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2051 rx_config->rss_config.hash_type =
Rasesh Mody078086f2011-08-08 16:21:39 +00002052 (BFI_ENET_RSS_IPV6 |
2053 BFI_ENET_RSS_IPV6_TCP |
2054 BFI_ENET_RSS_IPV4 |
2055 BFI_ENET_RSS_IPV4_TCP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002056 rx_config->rss_config.hash_mask =
2057 bnad->num_rxp_per_rx - 1;
Eric Dumazet0fa6aa42014-11-16 06:23:09 -08002058 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002059 sizeof(rx_config->rss_config.toeplitz_hash_key));
2060 } else {
2061 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2062 memset(&rx_config->rss_config, 0,
2063 sizeof(rx_config->rss_config));
2064 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002065
Rasesh Modye29aa332013-12-17 17:07:35 -08002066 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2067 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2068
2069 /* BNA_RXP_SINGLE - one data-buffer queue
2070 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2071 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2072 */
2073 /* TODO: configurable param for queue type */
2074 rx_config->rxp_type = BNA_RXP_SLR;
2075
2076 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2077 rx_config->frame_size > 4096) {
2078 /* though size_routing_enable is set in SLR,
2079 * small packets may get routed to same rxq.
2080 * set buf_size to 2048 instead of PAGE_SIZE.
2081 */
2082 rx_config->q0_buf_size = 2048;
2083 /* this should be in multiples of 2 */
2084 rx_config->q0_num_vecs = 4;
2085 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2086 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2087 } else {
2088 rx_config->q0_buf_size = rx_config->frame_size;
2089 rx_config->q0_num_vecs = 1;
2090 rx_config->q0_depth = bnad->rxq_depth;
2091 }
2092
2093 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2094 if (rx_config->rxp_type == BNA_RXP_SLR) {
2095 rx_config->q1_depth = bnad->rxq_depth;
2096 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2097 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002098
Ivan Vecera877767d2014-02-28 14:14:03 +01002099 rx_config->vlan_strip_status =
2100 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2101 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002102}
2103
Rasesh Mody2be67142011-08-30 15:27:39 +00002104static void
2105bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2106{
2107 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2108 int i;
2109
2110 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2111 rx_info->rx_ctrl[i].bnad = bnad;
2112}
2113
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002114/* Called with mutex_lock(&bnad->conf_mutex) held */
stephen hemminger2fd888a2014-01-15 08:24:21 -08002115static u32
Rasesh Modye29aa332013-12-17 17:07:35 -08002116bnad_reinit_rx(struct bnad *bnad)
2117{
2118 struct net_device *netdev = bnad->netdev;
2119 u32 err = 0, current_err = 0;
2120 u32 rx_id = 0, count = 0;
2121 unsigned long flags;
2122
2123 /* destroy and create new rx objects */
2124 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2125 if (!bnad->rx_info[rx_id].rx)
2126 continue;
2127 bnad_destroy_rx(bnad, rx_id);
2128 }
2129
2130 spin_lock_irqsave(&bnad->bna_lock, flags);
2131 bna_enet_mtu_set(&bnad->bna.enet,
2132 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2133 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2134
2135 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2136 count++;
2137 current_err = bnad_setup_rx(bnad, rx_id);
2138 if (current_err && !err) {
2139 err = current_err;
2140 pr_err("RXQ:%u setup failed\n", rx_id);
2141 }
2142 }
2143
2144 /* restore rx configuration */
2145 if (bnad->rx_info[0].rx && !err) {
2146 bnad_restore_vlans(bnad, 0);
2147 bnad_enable_default_bcast(bnad);
2148 spin_lock_irqsave(&bnad->bna_lock, flags);
2149 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2150 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2151 bnad_set_rx_mode(netdev);
2152 }
2153
2154 return count;
2155}
2156
2157/* Called with bnad_conf_lock() held */
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002158void
Jing Huangb3cc6e82012-04-04 05:44:14 +00002159bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002160{
2161 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2162 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2163 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2164 unsigned long flags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002165 int to_del = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002166
2167 if (!rx_info->rx)
2168 return;
2169
2170 if (0 == rx_id) {
2171 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002172 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2173 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002174 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002175 to_del = 1;
2176 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002177 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002178 if (to_del)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002179 del_timer_sync(&bnad->dim_timer);
2180 }
2181
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002182 init_completion(&bnad->bnad_completions.rx_comp);
2183 spin_lock_irqsave(&bnad->bna_lock, flags);
2184 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2185 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2186 wait_for_completion(&bnad->bnad_completions.rx_comp);
2187
2188 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2189 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2190
Jing Huang01b54b12012-04-04 05:43:18 +00002191 bnad_napi_delete(bnad, rx_id);
Rasesh Mody2be67142011-08-30 15:27:39 +00002192
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002193 spin_lock_irqsave(&bnad->bna_lock, flags);
2194 bna_rx_destroy(rx_info->rx);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002195
2196 rx_info->rx = NULL;
Rasesh Mody3caa1e952011-08-30 15:27:42 +00002197 rx_info->rx_id = 0;
Rasesh Modyb9fa1fb2011-09-16 15:06:48 +00002198 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002199
2200 bnad_rx_res_free(bnad, res_info);
2201}
2202
2203/* Called with mutex_lock(&bnad->conf_mutex) held */
2204int
Rasesh Mody078086f2011-08-08 16:21:39 +00002205bnad_setup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002206{
2207 int err;
2208 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2209 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2210 struct bna_intr_info *intr_info =
2211 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2212 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
stephen hemmingerd91d25d2011-09-16 11:09:51 +00002213 static const struct bna_rx_event_cbfn rx_cbfn = {
Rasesh Mody52165622012-12-11 12:24:51 +00002214 .rcb_setup_cbfn = NULL,
Jing Huang01b54b12012-04-04 05:43:18 +00002215 .rcb_destroy_cbfn = NULL,
stephen hemmingerd91d25d2011-09-16 11:09:51 +00002216 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2217 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +00002218 .rx_stall_cbfn = bnad_cb_rx_stall,
stephen hemmingerd91d25d2011-09-16 11:09:51 +00002219 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2220 .rx_post_cbfn = bnad_cb_rx_post,
2221 };
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002222 struct bna_rx *rx;
2223 unsigned long flags;
2224
Rasesh Mody078086f2011-08-08 16:21:39 +00002225 rx_info->rx_id = rx_id;
2226
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002227 /* Initialize the Rx object configuration */
2228 bnad_init_rx_config(bnad, rx_config);
2229
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002230 /* Get BNA's resource requirement for one Rx object */
2231 spin_lock_irqsave(&bnad->bna_lock, flags);
2232 bna_rx_res_req(rx_config, res_info);
2233 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2234
2235 /* Fill Unmap Q memory requirements */
Rasesh Modye29aa332013-12-17 17:07:35 -08002236 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2237 rx_config->num_paths,
2238 (rx_config->q0_depth *
2239 sizeof(struct bnad_rx_unmap)) +
2240 sizeof(struct bnad_rx_unmap_q));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002241
Rasesh Modye29aa332013-12-17 17:07:35 -08002242 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2243 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2244 rx_config->num_paths,
2245 (rx_config->q1_depth *
2246 sizeof(struct bnad_rx_unmap) +
2247 sizeof(struct bnad_rx_unmap_q)));
2248 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002249 /* Allocate resource */
2250 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2251 if (err)
2252 return err;
2253
Rasesh Mody2be67142011-08-30 15:27:39 +00002254 bnad_rx_ctrl_init(bnad, rx_id);
2255
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002256 /* Ask BNA to create one Rx object, supplying required resources */
2257 spin_lock_irqsave(&bnad->bna_lock, flags);
2258 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2259 rx_info);
Rasesh Mody3caa1e952011-08-30 15:27:42 +00002260 if (!rx) {
2261 err = -ENOMEM;
Rasesh Modyb9fa1fb2011-09-16 15:06:48 +00002262 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002263 goto err_return;
Rasesh Mody3caa1e952011-08-30 15:27:42 +00002264 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002265 rx_info->rx = rx;
Rasesh Modyb9fa1fb2011-09-16 15:06:48 +00002266 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002267
Jing Huang01b54b12012-04-04 05:43:18 +00002268 INIT_WORK(&rx_info->rx_cleanup_work,
2269 (work_func_t)(bnad_rx_cleanup));
2270
Rasesh Mody2be67142011-08-30 15:27:39 +00002271 /*
2272 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2273 * so that IRQ handler cannot schedule NAPI at this point.
2274 */
Jing Huang01b54b12012-04-04 05:43:18 +00002275 bnad_napi_add(bnad, rx_id);
Rasesh Mody2be67142011-08-30 15:27:39 +00002276
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002277 /* Register ISR for the Rx object */
2278 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2279 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2280 rx_config->num_paths);
2281 if (err)
2282 goto err_return;
2283 }
2284
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002285 spin_lock_irqsave(&bnad->bna_lock, flags);
2286 if (0 == rx_id) {
2287 /* Set up Dynamic Interrupt Moderation Vector */
2288 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2289 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2290
2291 /* Enable VLAN filtering only on the default Rx */
2292 bna_rx_vlanfilter_enable(rx);
2293
2294 /* Start the DIM timer */
2295 bnad_dim_timer_start(bnad);
2296 }
2297
2298 bna_rx_enable(rx);
2299 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2300
2301 return 0;
2302
2303err_return:
Jing Huangb3cc6e82012-04-04 05:44:14 +00002304 bnad_destroy_rx(bnad, rx_id);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002305 return err;
2306}
2307
2308/* Called with conf_lock & bnad->bna_lock held */
2309void
2310bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2311{
2312 struct bnad_tx_info *tx_info;
2313
2314 tx_info = &bnad->tx_info[0];
2315 if (!tx_info->tx)
2316 return;
2317
2318 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2319}
2320
2321/* Called with conf_lock & bnad->bna_lock held */
2322void
2323bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2324{
2325 struct bnad_rx_info *rx_info;
Rasesh Mody0120b992011-07-22 08:07:41 +00002326 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002327
2328 for (i = 0; i < bnad->num_rx; i++) {
2329 rx_info = &bnad->rx_info[i];
2330 if (!rx_info->rx)
2331 continue;
2332 bna_rx_coalescing_timeo_set(rx_info->rx,
2333 bnad->rx_coalescing_timeo);
2334 }
2335}
2336
2337/*
2338 * Called with bnad->bna_lock held
2339 */
Rasesh Modya2122d92011-08-30 15:27:43 +00002340int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002341bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2342{
2343 int ret;
2344
2345 if (!is_valid_ether_addr(mac_addr))
2346 return -EADDRNOTAVAIL;
2347
2348 /* If datapath is down, pretend everything went through */
2349 if (!bnad->rx_info[0].rx)
2350 return 0;
2351
2352 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2353 if (ret != BNA_CB_SUCCESS)
2354 return -EADDRNOTAVAIL;
2355
2356 return 0;
2357}
2358
2359/* Should be called with conf_lock held */
Rasesh Modya2122d92011-08-30 15:27:43 +00002360int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002361bnad_enable_default_bcast(struct bnad *bnad)
2362{
2363 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2364 int ret;
2365 unsigned long flags;
2366
2367 init_completion(&bnad->bnad_completions.mcast_comp);
2368
2369 spin_lock_irqsave(&bnad->bna_lock, flags);
2370 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2371 bnad_cb_rx_mcast_add);
2372 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2373
2374 if (ret == BNA_CB_SUCCESS)
2375 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2376 else
2377 return -ENODEV;
2378
2379 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2380 return -ENODEV;
2381
2382 return 0;
2383}
2384
Rasesh Mody19dbff92011-08-30 15:27:41 +00002385/* Called with mutex_lock(&bnad->conf_mutex) held */
Rasesh Modya2122d92011-08-30 15:27:43 +00002386void
Rasesh Modyaad75b62010-12-23 21:45:08 +00002387bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2388{
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002389 u16 vid;
Rasesh Modyaad75b62010-12-23 21:45:08 +00002390 unsigned long flags;
2391
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002392 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
Rasesh Modyaad75b62010-12-23 21:45:08 +00002393 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002394 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
Rasesh Modyaad75b62010-12-23 21:45:08 +00002395 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2396 }
2397}
2398
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002399/* Statistics utilities */
2400void
Eric Dumazet250e0612010-09-02 12:45:02 -07002401bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002402{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002403 int i, j;
2404
2405 for (i = 0; i < bnad->num_rx; i++) {
2406 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2407 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002408 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002409 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002410 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002411 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2412 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2413 bnad->rx_info[i].rx_ctrl[j].ccb->
2414 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002415 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002416 bnad->rx_info[i].rx_ctrl[j].
2417 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002418 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002419 bnad->rx_info[i].rx_ctrl[j].
2420 ccb->rcb[1]->rxq->rx_bytes;
2421 }
2422 }
2423 }
2424 }
2425 for (i = 0; i < bnad->num_tx; i++) {
2426 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2427 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002428 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002429 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002430 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002431 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2432 }
2433 }
2434 }
2435}
2436
2437/*
2438 * Must be called with the bna_lock held.
2439 */
2440void
Eric Dumazet250e0612010-09-02 12:45:02 -07002441bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002442{
Rasesh Mody078086f2011-08-08 16:21:39 +00002443 struct bfi_enet_stats_mac *mac_stats;
2444 u32 bmap;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002445 int i;
2446
Rasesh Mody078086f2011-08-08 16:21:39 +00002447 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002448 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002449 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2450 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2451 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002452 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002453 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002454 stats->rx_dropped = mac_stats->rx_drop;
2455 stats->tx_dropped = mac_stats->tx_drop;
2456 stats->multicast = mac_stats->rx_multicast;
2457 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002458
Eric Dumazet250e0612010-09-02 12:45:02 -07002459 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002460
2461 /* receive ring buffer overflow ?? */
2462
Eric Dumazet250e0612010-09-02 12:45:02 -07002463 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2464 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002465 /* recv'r fifo overrun */
Rasesh Mody078086f2011-08-08 16:21:39 +00002466 bmap = bna_rx_rid_mask(&bnad->bna);
2467 for (i = 0; bmap; i++) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002468 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002469 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002470 bnad->stats.bna_stats->
Rasesh Mody078086f2011-08-08 16:21:39 +00002471 hw_stats.rxf_stats[i].frame_drops;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002472 break;
2473 }
2474 bmap >>= 1;
2475 }
2476}
2477
2478static void
2479bnad_mbox_irq_sync(struct bnad *bnad)
2480{
2481 u32 irq;
2482 unsigned long flags;
2483
2484 spin_lock_irqsave(&bnad->bna_lock, flags);
2485 if (bnad->cfg_flags & BNAD_CF_MSIX)
Rasesh Mody8811e262011-07-22 08:07:44 +00002486 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002487 else
2488 irq = bnad->pcidev->irq;
2489 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2490
2491 synchronize_irq(irq);
2492}
2493
2494/* Utility used by bnad_start_xmit, for doing TSO */
2495static int
2496bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2497{
2498 int err;
2499
françois romieub13a8a92014-03-29 12:26:28 +01002500 err = skb_cow_head(skb, 0);
2501 if (err < 0) {
2502 BNAD_UPDATE_CTR(bnad, tso_err);
2503 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002504 }
2505
2506 /*
2507 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2508 * excluding the length field.
2509 */
Vlad Yasevich1c537302014-08-25 10:34:50 -04002510 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002511 struct iphdr *iph = ip_hdr(skb);
2512
2513 /* Do we really need these? */
2514 iph->tot_len = 0;
2515 iph->check = 0;
2516
2517 tcp_hdr(skb)->check =
2518 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2519 IPPROTO_TCP, 0);
2520 BNAD_UPDATE_CTR(bnad, tso4);
2521 } else {
2522 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2523
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002524 ipv6h->payload_len = 0;
2525 tcp_hdr(skb)->check =
2526 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2527 IPPROTO_TCP, 0);
2528 BNAD_UPDATE_CTR(bnad, tso6);
2529 }
2530
2531 return 0;
2532}
2533
2534/*
2535 * Initialize Q numbers depending on Rx Paths
2536 * Called with bnad->bna_lock held, because of cfg_flags
2537 * access.
2538 */
2539static void
2540bnad_q_num_init(struct bnad *bnad)
2541{
2542 int rxps;
2543
2544 rxps = min((uint)num_online_cpus(),
Rasesh Mody772b5232011-08-30 15:27:37 +00002545 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002546
2547 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2548 rxps = 1; /* INTx */
2549
2550 bnad->num_rx = 1;
2551 bnad->num_tx = 1;
2552 bnad->num_rxp_per_rx = rxps;
2553 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2554}
2555
2556/*
2557 * Adjusts the Q numbers, given a number of msix vectors
2558 * Give preference to RSS as opposed to Tx priority Queues,
2559 * in such a case, just use 1 Tx Q
2560 * Called with bnad->bna_lock held b'cos of cfg_flags access
2561 */
2562static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002563bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002564{
2565 bnad->num_txq_per_tx = 1;
2566 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2567 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2568 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2569 bnad->num_rxp_per_rx = msix_vectors -
2570 (bnad->num_tx * bnad->num_txq_per_tx) -
2571 BNAD_MAILBOX_MSIX_VECTORS;
2572 } else
2573 bnad->num_rxp_per_rx = 1;
2574}
2575
Rasesh Mody078086f2011-08-08 16:21:39 +00002576/* Enable / disable ioceth */
2577static int
2578bnad_ioceth_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002579{
2580 unsigned long flags;
Rasesh Mody078086f2011-08-08 16:21:39 +00002581 int err = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002582
2583 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002584 init_completion(&bnad->bnad_completions.ioc_comp);
2585 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002586 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2587
Rasesh Mody078086f2011-08-08 16:21:39 +00002588 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2589 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2590
2591 err = bnad->bnad_completions.ioc_comp_status;
2592 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002593}
2594
2595static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002596bnad_ioceth_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002597{
2598 int err = 0;
2599 unsigned long flags;
2600
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002601 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002602 init_completion(&bnad->bnad_completions.ioc_comp);
2603 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2604 bna_ioceth_enable(&bnad->bna.ioceth);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002605 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2606
Rasesh Mody078086f2011-08-08 16:21:39 +00002607 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2608 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002609
Rasesh Mody078086f2011-08-08 16:21:39 +00002610 err = bnad->bnad_completions.ioc_comp_status;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002611
2612 return err;
2613}
2614
2615/* Free BNA resources */
2616static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002617bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2618 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002619{
2620 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002621
Rasesh Mody078086f2011-08-08 16:21:39 +00002622 for (i = 0; i < res_val_max; i++)
2623 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002624}
2625
2626/* Allocates memory and interrupt resources for BNA */
2627static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002628bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2629 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002630{
2631 int i, err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002632
Rasesh Mody078086f2011-08-08 16:21:39 +00002633 for (i = 0; i < res_val_max; i++) {
2634 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002635 if (err)
2636 goto err_return;
2637 }
2638 return 0;
2639
2640err_return:
Rasesh Mody078086f2011-08-08 16:21:39 +00002641 bnad_res_free(bnad, res_info, res_val_max);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002642 return err;
2643}
2644
2645/* Interrupt enable / disable */
2646static void
2647bnad_enable_msix(struct bnad *bnad)
2648{
2649 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002650 unsigned long flags;
2651
2652 spin_lock_irqsave(&bnad->bna_lock, flags);
2653 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2654 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2655 return;
2656 }
2657 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2658
2659 if (bnad->msix_table)
2660 return;
2661
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002662 bnad->msix_table =
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002663 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002664
2665 if (!bnad->msix_table)
2666 goto intx_mode;
2667
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002668 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002669 bnad->msix_table[i].entry = i;
2670
Alexander Gordeev43c20202014-02-18 11:07:56 +01002671 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2672 1, bnad->msix_num);
2673 if (ret < 0) {
2674 goto intx_mode;
2675 } else if (ret < bnad->msix_num) {
Rasesh Mody19dbff92011-08-30 15:27:41 +00002676 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2677 ret, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002678
2679 spin_lock_irqsave(&bnad->bna_lock, flags);
2680 /* ret = #of vectors that we got */
Rasesh Mody271e8b72011-08-30 15:27:40 +00002681 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2682 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002683 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2684
Rasesh Mody271e8b72011-08-30 15:27:40 +00002685 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002686 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002687
Alexander Gordeev43c20202014-02-18 11:07:56 +01002688 if (bnad->msix_num > ret) {
2689 pci_disable_msix(bnad->pcidev);
Rasesh Mody078086f2011-08-08 16:21:39 +00002690 goto intx_mode;
Alexander Gordeev43c20202014-02-18 11:07:56 +01002691 }
2692 }
Rasesh Mody078086f2011-08-08 16:21:39 +00002693
2694 pci_intx(bnad->pcidev, 0);
2695
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002696 return;
2697
2698intx_mode:
Rasesh Mody19dbff92011-08-30 15:27:41 +00002699 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002700
2701 kfree(bnad->msix_table);
2702 bnad->msix_table = NULL;
2703 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002704 spin_lock_irqsave(&bnad->bna_lock, flags);
2705 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2706 bnad_q_num_init(bnad);
2707 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2708}
2709
2710static void
2711bnad_disable_msix(struct bnad *bnad)
2712{
2713 u32 cfg_flags;
2714 unsigned long flags;
2715
2716 spin_lock_irqsave(&bnad->bna_lock, flags);
2717 cfg_flags = bnad->cfg_flags;
2718 if (bnad->cfg_flags & BNAD_CF_MSIX)
2719 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2720 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2721
2722 if (cfg_flags & BNAD_CF_MSIX) {
2723 pci_disable_msix(bnad->pcidev);
2724 kfree(bnad->msix_table);
2725 bnad->msix_table = NULL;
2726 }
2727}
2728
2729/* Netdev entry points */
2730static int
2731bnad_open(struct net_device *netdev)
2732{
2733 int err;
2734 struct bnad *bnad = netdev_priv(netdev);
2735 struct bna_pause_config pause_config;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002736 unsigned long flags;
2737
2738 mutex_lock(&bnad->conf_mutex);
2739
2740 /* Tx */
2741 err = bnad_setup_tx(bnad, 0);
2742 if (err)
2743 goto err_return;
2744
2745 /* Rx */
2746 err = bnad_setup_rx(bnad, 0);
2747 if (err)
2748 goto cleanup_tx;
2749
2750 /* Port */
2751 pause_config.tx_pause = 0;
2752 pause_config.rx_pause = 0;
2753
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002754 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Modye29aa332013-12-17 17:07:35 -08002755 bna_enet_mtu_set(&bnad->bna.enet,
2756 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
Rasesh Mody078086f2011-08-08 16:21:39 +00002757 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2758 bna_enet_enable(&bnad->bna.enet);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002759 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2760
2761 /* Enable broadcast */
2762 bnad_enable_default_bcast(bnad);
2763
Rasesh Modyaad75b62010-12-23 21:45:08 +00002764 /* Restore VLANs, if any */
2765 bnad_restore_vlans(bnad, 0);
2766
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002767 /* Set the UCAST address */
2768 spin_lock_irqsave(&bnad->bna_lock, flags);
2769 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2770 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2771
2772 /* Start the stats timer */
2773 bnad_stats_timer_start(bnad);
2774
2775 mutex_unlock(&bnad->conf_mutex);
2776
2777 return 0;
2778
2779cleanup_tx:
Jing Huangb3cc6e82012-04-04 05:44:14 +00002780 bnad_destroy_tx(bnad, 0);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002781
2782err_return:
2783 mutex_unlock(&bnad->conf_mutex);
2784 return err;
2785}
2786
2787static int
2788bnad_stop(struct net_device *netdev)
2789{
2790 struct bnad *bnad = netdev_priv(netdev);
2791 unsigned long flags;
2792
2793 mutex_lock(&bnad->conf_mutex);
2794
2795 /* Stop the stats timer */
2796 bnad_stats_timer_stop(bnad);
2797
Rasesh Mody078086f2011-08-08 16:21:39 +00002798 init_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002799
2800 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002801 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2802 bnad_cb_enet_disabled);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002803 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2804
Rasesh Mody078086f2011-08-08 16:21:39 +00002805 wait_for_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002806
Jing Huangb3cc6e82012-04-04 05:44:14 +00002807 bnad_destroy_tx(bnad, 0);
2808 bnad_destroy_rx(bnad, 0);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002809
2810 /* Synchronize mailbox IRQ */
2811 bnad_mbox_irq_sync(bnad);
2812
2813 mutex_unlock(&bnad->conf_mutex);
2814
2815 return 0;
2816}
2817
2818/* TX */
Rasesh Mody52165622012-12-11 12:24:51 +00002819/* Returns 0 for success */
2820static int
2821bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2822 struct sk_buff *skb, struct bna_txq_entry *txqent)
2823{
2824 u16 flags = 0;
2825 u32 gso_size;
2826 u16 vlan_tag = 0;
2827
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002828 if (skb_vlan_tag_present(skb)) {
2829 vlan_tag = (u16)skb_vlan_tag_get(skb);
Rasesh Mody52165622012-12-11 12:24:51 +00002830 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2831 }
2832 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2833 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2834 | (vlan_tag & 0x1fff);
2835 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2836 }
2837 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2838
2839 if (skb_is_gso(skb)) {
2840 gso_size = skb_shinfo(skb)->gso_size;
2841 if (unlikely(gso_size > bnad->netdev->mtu)) {
2842 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2843 return -EINVAL;
2844 }
2845 if (unlikely((gso_size + skb_transport_offset(skb) +
2846 tcp_hdrlen(skb)) >= skb->len)) {
Joe Perchesb779d0a2014-03-12 10:22:30 -07002847 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
Rasesh Mody52165622012-12-11 12:24:51 +00002848 txqent->hdr.wi.lso_mss = 0;
2849 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2850 } else {
Joe Perchesb779d0a2014-03-12 10:22:30 -07002851 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
Rasesh Mody52165622012-12-11 12:24:51 +00002852 txqent->hdr.wi.lso_mss = htons(gso_size);
2853 }
2854
2855 if (bnad_tso_prepare(bnad, skb)) {
2856 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2857 return -EINVAL;
2858 }
2859
2860 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2861 txqent->hdr.wi.l4_hdr_size_n_offset =
2862 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2863 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2864 } else {
Joe Perchesb779d0a2014-03-12 10:22:30 -07002865 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
Rasesh Mody52165622012-12-11 12:24:51 +00002866 txqent->hdr.wi.lso_mss = 0;
2867
Ivan Vecera6654cf62014-10-06 19:02:37 +02002868 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
Rasesh Mody52165622012-12-11 12:24:51 +00002869 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2870 return -EINVAL;
2871 }
2872
2873 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Vlad Yasevich1c537302014-08-25 10:34:50 -04002874 __be16 net_proto = vlan_get_protocol(skb);
Rasesh Mody52165622012-12-11 12:24:51 +00002875 u8 proto = 0;
2876
Vlad Yasevich1c537302014-08-25 10:34:50 -04002877 if (net_proto == htons(ETH_P_IP))
Rasesh Mody52165622012-12-11 12:24:51 +00002878 proto = ip_hdr(skb)->protocol;
2879#ifdef NETIF_F_IPV6_CSUM
Vlad Yasevich1c537302014-08-25 10:34:50 -04002880 else if (net_proto == htons(ETH_P_IPV6)) {
Rasesh Mody52165622012-12-11 12:24:51 +00002881 /* nexthdr may not be TCP immediately. */
2882 proto = ipv6_hdr(skb)->nexthdr;
2883 }
2884#endif
2885 if (proto == IPPROTO_TCP) {
2886 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2887 txqent->hdr.wi.l4_hdr_size_n_offset =
2888 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2889 (0, skb_transport_offset(skb)));
2890
2891 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2892
2893 if (unlikely(skb_headlen(skb) <
2894 skb_transport_offset(skb) +
2895 tcp_hdrlen(skb))) {
2896 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2897 return -EINVAL;
2898 }
2899 } else if (proto == IPPROTO_UDP) {
2900 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2901 txqent->hdr.wi.l4_hdr_size_n_offset =
2902 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2903 (0, skb_transport_offset(skb)));
2904
2905 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2906 if (unlikely(skb_headlen(skb) <
2907 skb_transport_offset(skb) +
2908 sizeof(struct udphdr))) {
2909 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2910 return -EINVAL;
2911 }
2912 } else {
2913
2914 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2915 return -EINVAL;
2916 }
2917 } else
2918 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2919 }
2920
2921 txqent->hdr.wi.flags = htons(flags);
2922 txqent->hdr.wi.frame_length = htonl(skb->len);
2923
2924 return 0;
2925}
2926
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002927/*
2928 * bnad_start_xmit : Netdev entry point for Transmit
2929 * Called under lock held by net_device
2930 */
2931static netdev_tx_t
2932bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2933{
2934 struct bnad *bnad = netdev_priv(netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00002935 u32 txq_id = 0;
Rasesh Mody52165622012-12-11 12:24:51 +00002936 struct bna_tcb *tcb = NULL;
2937 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2938 u32 prod, q_depth, vect_id;
2939 u32 wis, vectors, len;
2940 int i;
Rasesh Mody0120b992011-07-22 08:07:41 +00002941 dma_addr_t dma_addr;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002942 struct bna_txq_entry *txqent;
Rasesh Mody52165622012-12-11 12:24:51 +00002943
2944 len = skb_headlen(skb);
2945
2946 /* Sanity checks for the skb */
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002947
Rasesh Mody271e8b72011-08-30 15:27:40 +00002948 if (unlikely(skb->len <= ETH_HLEN)) {
Eric W. Biederman27400df2014-03-15 16:06:40 -07002949 dev_kfree_skb_any(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2951 return NETDEV_TX_OK;
2952 }
Rasesh Mody52165622012-12-11 12:24:51 +00002953 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
Eric W. Biederman27400df2014-03-15 16:06:40 -07002954 dev_kfree_skb_any(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002956 return NETDEV_TX_OK;
2957 }
Rasesh Mody52165622012-12-11 12:24:51 +00002958 if (unlikely(len == 0)) {
Eric W. Biederman27400df2014-03-15 16:06:40 -07002959 dev_kfree_skb_any(skb);
Rasesh Mody52165622012-12-11 12:24:51 +00002960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2961 return NETDEV_TX_OK;
2962 }
2963
2964 tcb = bnad->tx_info[0].tcb[txq_id];
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002965
Rasesh Modybe7fa322010-12-23 21:45:01 +00002966 /*
2967 * Takes care of the Tx that is scheduled between clearing the flag
Rasesh Mody19dbff92011-08-30 15:27:41 +00002968 * and the netif_tx_stop_all_queues() call.
Rasesh Modybe7fa322010-12-23 21:45:01 +00002969 */
Rasesh Mody96e31ad2013-12-17 17:07:38 -08002970 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
Eric W. Biederman27400df2014-03-15 16:06:40 -07002971 dev_kfree_skb_any(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002973 return NETDEV_TX_OK;
2974 }
2975
Rasesh Mody96e31ad2013-12-17 17:07:38 -08002976 q_depth = tcb->q_depth;
2977 prod = tcb->producer_index;
2978 unmap_q = tcb->unmap_q;
2979
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002980 vectors = 1 + skb_shinfo(skb)->nr_frags;
Rasesh Mody52165622012-12-11 12:24:51 +00002981 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2982
Rasesh Mody271e8b72011-08-30 15:27:40 +00002983 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
Eric W. Biederman27400df2014-03-15 16:06:40 -07002984 dev_kfree_skb_any(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002986 return NETDEV_TX_OK;
2987 }
Rasesh Mody52165622012-12-11 12:24:51 +00002988
2989 /* Check for available TxQ resources */
2990 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2991 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002992 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
Rasesh Mody52165622012-12-11 12:24:51 +00002993 u32 sent;
2994 sent = bnad_txcmpl_process(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002995 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody52165622012-12-11 12:24:51 +00002996 bna_ib_ack(tcb->i_dbell, sent);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002997 smp_mb__before_atomic();
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002998 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2999 } else {
3000 netif_stop_queue(netdev);
3001 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3002 }
3003
3004 smp_mb();
3005 /*
3006 * Check again to deal with race condition between
3007 * netif_stop_queue here, and netif_wake_queue in
3008 * interrupt handler which is not inside netif tx lock.
3009 */
Rasesh Mody52165622012-12-11 12:24:51 +00003010 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003011 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3012 return NETDEV_TX_BUSY;
3013 } else {
3014 netif_wake_queue(netdev);
3015 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3016 }
3017 }
3018
Rasesh Mody52165622012-12-11 12:24:51 +00003019 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3020 head_unmap = &unmap_q[prod];
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003021
Rasesh Mody52165622012-12-11 12:24:51 +00003022 /* Program the opcode, flags, frame_len, num_vectors in WI */
3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
Eric W. Biederman27400df2014-03-15 16:06:40 -07003024 dev_kfree_skb_any(skb);
Rasesh Mody52165622012-12-11 12:24:51 +00003025 return NETDEV_TX_OK;
3026 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003027 txqent->hdr.wi.reserved = 0;
3028 txqent->hdr.wi.num_vectors = vectors;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003029
Rasesh Mody52165622012-12-11 12:24:51 +00003030 head_unmap->skb = skb;
3031 head_unmap->nvecs = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003032
Rasesh Mody52165622012-12-11 12:24:51 +00003033 /* Program the vectors */
3034 unmap = head_unmap;
Ivan Vecera5ea74312011-02-02 04:37:02 +00003035 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
Rasesh Mody52165622012-12-11 12:24:51 +00003036 len, DMA_TO_DEVICE);
Rasesh Mody271e8b72011-08-30 15:27:40 +00003037 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
Rasesh Mody52165622012-12-11 12:24:51 +00003038 txqent->vector[0].length = htons(len);
3039 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3040 head_unmap->nvecs++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003041
Rasesh Mody52165622012-12-11 12:24:51 +00003042 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00003043 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Rasesh Mody24f5d332013-12-17 17:07:40 -08003044 u32 size = skb_frag_size(frag);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003045
Rasesh Mody271e8b72011-08-30 15:27:40 +00003046 if (unlikely(size == 0)) {
Rasesh Mody52165622012-12-11 12:24:51 +00003047 /* Undo the changes starting at tcb->producer_index */
3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3049 tcb->producer_index);
Eric W. Biederman27400df2014-03-15 16:06:40 -07003050 dev_kfree_skb_any(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00003051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3052 return NETDEV_TX_OK;
3053 }
3054
3055 len += size;
3056
Rasesh Mody52165622012-12-11 12:24:51 +00003057 vect_id++;
3058 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003059 vect_id = 0;
Rasesh Mody52165622012-12-11 12:24:51 +00003060 BNA_QE_INDX_INC(prod, q_depth);
3061 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
Joe Perchesb779d0a2014-03-12 10:22:30 -07003062 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
Rasesh Mody52165622012-12-11 12:24:51 +00003063 unmap = &unmap_q[prod];
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003064 }
3065
Ian Campbell4d5b1a62011-08-29 23:18:24 +00003066 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3067 0, size, DMA_TO_DEVICE);
David S. Millerecca6a92014-01-06 20:37:41 -05003068 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003069 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
Rasesh Mody52165622012-12-11 12:24:51 +00003070 txqent->vector[vect_id].length = htons(size);
3071 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
David S. Millerecca6a92014-01-06 20:37:41 -05003072 dma_addr);
Rasesh Mody52165622012-12-11 12:24:51 +00003073 head_unmap->nvecs++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003074 }
3075
Rasesh Mody271e8b72011-08-30 15:27:40 +00003076 if (unlikely(len != skb->len)) {
Rasesh Mody52165622012-12-11 12:24:51 +00003077 /* Undo the changes starting at tcb->producer_index */
3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
Eric W. Biederman27400df2014-03-15 16:06:40 -07003079 dev_kfree_skb_any(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00003080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3081 return NETDEV_TX_OK;
3082 }
3083
Rasesh Mody52165622012-12-11 12:24:51 +00003084 BNA_QE_INDX_INC(prod, q_depth);
3085 tcb->producer_index = prod;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003086
3087 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00003088
3089 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3090 return NETDEV_TX_OK;
3091
Rasesh Modyfee12532013-12-17 17:07:31 -08003092 skb_tx_timestamp(skb);
3093
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003094 bna_txq_prod_indx_doorbell(tcb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00003095 smp_mb();
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003096
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003097 return NETDEV_TX_OK;
3098}
3099
3100/*
3101 * Used spin_lock to synchronize reading of stats structures, which
3102 * is written by BNA under the same lock.
3103 */
Eric Dumazet250e0612010-09-02 12:45:02 -07003104static struct rtnl_link_stats64 *
3105bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003106{
3107 struct bnad *bnad = netdev_priv(netdev);
3108 unsigned long flags;
3109
3110 spin_lock_irqsave(&bnad->bna_lock, flags);
3111
Eric Dumazet250e0612010-09-02 12:45:02 -07003112 bnad_netdev_qstats_fill(bnad, stats);
3113 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003114
3115 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3116
Eric Dumazet250e0612010-09-02 12:45:02 -07003117 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003118}
3119
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003120static void
3121bnad_set_rx_ucast_fltr(struct bnad *bnad)
3122{
3123 struct net_device *netdev = bnad->netdev;
3124 int uc_count = netdev_uc_count(netdev);
3125 enum bna_cb_status ret;
3126 u8 *mac_list;
3127 struct netdev_hw_addr *ha;
3128 int entry;
3129
3130 if (netdev_uc_empty(bnad->netdev)) {
3131 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3132 return;
3133 }
3134
3135 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3136 goto mode_default;
3137
3138 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3139 if (mac_list == NULL)
3140 goto mode_default;
3141
3142 entry = 0;
3143 netdev_for_each_uc_addr(ha, netdev) {
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +02003144 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003145 entry++;
3146 }
3147
3148 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3149 mac_list, NULL);
3150 kfree(mac_list);
3151
3152 if (ret != BNA_CB_SUCCESS)
3153 goto mode_default;
3154
3155 return;
3156
3157 /* ucast packets not in UCAM are routed to default function */
3158mode_default:
3159 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3160 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3161}
3162
3163static void
3164bnad_set_rx_mcast_fltr(struct bnad *bnad)
3165{
3166 struct net_device *netdev = bnad->netdev;
3167 int mc_count = netdev_mc_count(netdev);
3168 enum bna_cb_status ret;
3169 u8 *mac_list;
3170
3171 if (netdev->flags & IFF_ALLMULTI)
3172 goto mode_allmulti;
3173
3174 if (netdev_mc_empty(netdev))
3175 return;
3176
3177 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3178 goto mode_allmulti;
3179
3180 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3181
3182 if (mac_list == NULL)
3183 goto mode_allmulti;
3184
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +02003185 ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003186
3187 /* copy rest of the MCAST addresses */
3188 bnad_netdev_mc_list_get(netdev, mac_list);
3189 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3190 mac_list, NULL);
3191 kfree(mac_list);
3192
3193 if (ret != BNA_CB_SUCCESS)
3194 goto mode_allmulti;
3195
3196 return;
3197
3198mode_allmulti:
3199 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3200 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3201}
3202
Rasesh Modya2122d92011-08-30 15:27:43 +00003203void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003204bnad_set_rx_mode(struct net_device *netdev)
3205{
3206 struct bnad *bnad = netdev_priv(netdev);
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003207 enum bna_rxmode new_mode, mode_mask;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003208 unsigned long flags;
3209
3210 spin_lock_irqsave(&bnad->bna_lock, flags);
3211
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003212 if (bnad->rx_info[0].rx == NULL) {
3213 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3214 return;
3215 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003216
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003217 /* clear bnad flags to update it with new settings */
3218 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3219 BNAD_CF_ALLMULTI);
3220
3221 new_mode = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003222 if (netdev->flags & IFF_PROMISC) {
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003223 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3224 bnad->cfg_flags |= BNAD_CF_PROMISC;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003225 } else {
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003226 bnad_set_rx_mcast_fltr(bnad);
3227
3228 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3229 new_mode |= BNA_RXMODE_ALLMULTI;
3230
3231 bnad_set_rx_ucast_fltr(bnad);
3232
3233 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3234 new_mode |= BNA_RXMODE_DEFAULT;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003235 }
3236
Rasesh Modyfe1624c2013-12-17 17:07:34 -08003237 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3238 BNA_RXMODE_ALLMULTI;
3239 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003240
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003241 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3242}
3243
3244/*
3245 * bna_lock is used to sync writes to netdev->addr
3246 * conf_lock cannot be used since this call may be made
3247 * in a non-blocking context.
3248 */
3249static int
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +02003250bnad_set_mac_address(struct net_device *netdev, void *addr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003251{
3252 int err;
3253 struct bnad *bnad = netdev_priv(netdev);
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +02003254 struct sockaddr *sa = (struct sockaddr *)addr;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003255 unsigned long flags;
3256
3257 spin_lock_irqsave(&bnad->bna_lock, flags);
3258
3259 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003260 if (!err)
Ivan Vecerae2f9ecf2015-06-11 15:52:13 +02003261 ether_addr_copy(netdev->dev_addr, sa->sa_data);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003262
3263 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3264
3265 return err;
3266}
3267
3268static int
Rasesh Modye29aa332013-12-17 17:07:35 -08003269bnad_mtu_set(struct bnad *bnad, int frame_size)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003270{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003271 unsigned long flags;
3272
Rasesh Mody078086f2011-08-08 16:21:39 +00003273 init_completion(&bnad->bnad_completions.mtu_comp);
3274
3275 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Modye29aa332013-12-17 17:07:35 -08003276 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
Rasesh Mody078086f2011-08-08 16:21:39 +00003277 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3278
3279 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3280
3281 return bnad->bnad_completions.mtu_comp_status;
3282}
3283
3284static int
3285bnad_change_mtu(struct net_device *netdev, int new_mtu)
3286{
Rasesh Modye29aa332013-12-17 17:07:35 -08003287 int err, mtu;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003288 struct bnad *bnad = netdev_priv(netdev);
Rasesh Modye29aa332013-12-17 17:07:35 -08003289 u32 rx_count = 0, frame, new_frame;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003290
3291 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3292 return -EINVAL;
3293
3294 mutex_lock(&bnad->conf_mutex);
3295
Rasesh Modye29aa332013-12-17 17:07:35 -08003296 mtu = netdev->mtu;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003297 netdev->mtu = new_mtu;
3298
Rasesh Modye29aa332013-12-17 17:07:35 -08003299 frame = BNAD_FRAME_SIZE(mtu);
3300 new_frame = BNAD_FRAME_SIZE(new_mtu);
3301
3302 /* check if multi-buffer needs to be enabled */
3303 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3304 netif_running(bnad->netdev)) {
3305 /* only when transition is over 4K */
3306 if ((frame <= 4096 && new_frame > 4096) ||
3307 (frame > 4096 && new_frame <= 4096))
3308 rx_count = bnad_reinit_rx(bnad);
3309 }
3310
3311 /* rx_count > 0 - new rx created
3312 * - Linux set err = 0 and return
3313 */
3314 err = bnad_mtu_set(bnad, new_frame);
Rasesh Mody078086f2011-08-08 16:21:39 +00003315 if (err)
3316 err = -EBUSY;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003317
3318 mutex_unlock(&bnad->conf_mutex);
3319 return err;
3320}
3321
Jiri Pirko8e586132011-12-08 19:52:37 -05003322static int
Patrick McHardy80d5c362013-04-19 02:04:28 +00003323bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003324{
3325 struct bnad *bnad = netdev_priv(netdev);
3326 unsigned long flags;
3327
3328 if (!bnad->rx_info[0].rx)
Jiri Pirko8e586132011-12-08 19:52:37 -05003329 return 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003330
3331 mutex_lock(&bnad->conf_mutex);
3332
3333 spin_lock_irqsave(&bnad->bna_lock, flags);
3334 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00003335 set_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003336 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3337
3338 mutex_unlock(&bnad->conf_mutex);
Jiri Pirko8e586132011-12-08 19:52:37 -05003339
3340 return 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003341}
3342
Jiri Pirko8e586132011-12-08 19:52:37 -05003343static int
Patrick McHardy80d5c362013-04-19 02:04:28 +00003344bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003345{
3346 struct bnad *bnad = netdev_priv(netdev);
3347 unsigned long flags;
3348
3349 if (!bnad->rx_info[0].rx)
Jiri Pirko8e586132011-12-08 19:52:37 -05003350 return 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003351
3352 mutex_lock(&bnad->conf_mutex);
3353
3354 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00003355 clear_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003356 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3358
3359 mutex_unlock(&bnad->conf_mutex);
Jiri Pirko8e586132011-12-08 19:52:37 -05003360
3361 return 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003362}
3363
Ivan Vecera877767d2014-02-28 14:14:03 +01003364static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3365{
3366 struct bnad *bnad = netdev_priv(dev);
3367 netdev_features_t changed = features ^ dev->features;
3368
3369 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3370 unsigned long flags;
3371
3372 spin_lock_irqsave(&bnad->bna_lock, flags);
3373
3374 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3375 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3376 else
3377 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3378
3379 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3380 }
3381
3382 return 0;
3383}
3384
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003385#ifdef CONFIG_NET_POLL_CONTROLLER
3386static void
3387bnad_netpoll(struct net_device *netdev)
3388{
3389 struct bnad *bnad = netdev_priv(netdev);
3390 struct bnad_rx_info *rx_info;
3391 struct bnad_rx_ctrl *rx_ctrl;
3392 u32 curr_mask;
3393 int i, j;
3394
3395 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3396 bna_intx_disable(&bnad->bna, curr_mask);
3397 bnad_isr(bnad->pcidev->irq, netdev);
3398 bna_intx_enable(&bnad->bna, curr_mask);
3399 } else {
Rasesh Mody19dbff92011-08-30 15:27:41 +00003400 /*
3401 * Tx processing may happen in sending context, so no need
3402 * to explicitly process completions here
3403 */
3404
3405 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003406 for (i = 0; i < bnad->num_rx; i++) {
3407 rx_info = &bnad->rx_info[i];
3408 if (!rx_info->rx)
3409 continue;
3410 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3411 rx_ctrl = &rx_info->rx_ctrl[j];
Rasesh Mody271e8b72011-08-30 15:27:40 +00003412 if (rx_ctrl->ccb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003413 bnad_netif_rx_schedule_poll(bnad,
3414 rx_ctrl->ccb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003415 }
3416 }
3417 }
3418}
3419#endif
3420
3421static const struct net_device_ops bnad_netdev_ops = {
3422 .ndo_open = bnad_open,
3423 .ndo_stop = bnad_stop,
3424 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07003425 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003426 .ndo_set_rx_mode = bnad_set_rx_mode,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003427 .ndo_validate_addr = eth_validate_addr,
3428 .ndo_set_mac_address = bnad_set_mac_address,
3429 .ndo_change_mtu = bnad_change_mtu,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003430 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3431 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
Ivan Vecera877767d2014-02-28 14:14:03 +01003432 .ndo_set_features = bnad_set_features,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003433#ifdef CONFIG_NET_POLL_CONTROLLER
3434 .ndo_poll_controller = bnad_netpoll
3435#endif
3436};
3437
3438static void
3439bnad_netdev_init(struct bnad *bnad, bool using_dac)
3440{
3441 struct net_device *netdev = bnad->netdev;
3442
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003443 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3444 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ivan Vecera877767d2014-02-28 14:14:03 +01003445 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3446 NETIF_F_HW_VLAN_CTAG_RX;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003447
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003448 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3449 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3450 NETIF_F_TSO | NETIF_F_TSO6;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003451
Ivan Vecera877767d2014-02-28 14:14:03 +01003452 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003453
3454 if (using_dac)
3455 netdev->features |= NETIF_F_HIGHDMA;
3456
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003457 netdev->mem_start = bnad->mmio_start;
3458 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3459
3460 netdev->netdev_ops = &bnad_netdev_ops;
3461 bnad_set_ethtool_ops(netdev);
3462}
3463
3464/*
3465 * 1. Initialize the bnad structure
3466 * 2. Setup netdev pointer in pci_dev
Jing Huangd95d1082012-04-04 05:43:48 +00003467 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3468 * 4. Initialize work queue.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003469 */
3470static int
3471bnad_init(struct bnad *bnad,
3472 struct pci_dev *pdev, struct net_device *netdev)
3473{
3474 unsigned long flags;
3475
3476 SET_NETDEV_DEV(netdev, &pdev->dev);
3477 pci_set_drvdata(pdev, netdev);
3478
3479 bnad->netdev = netdev;
3480 bnad->pcidev = pdev;
3481 bnad->mmio_start = pci_resource_start(pdev, 0);
3482 bnad->mmio_len = pci_resource_len(pdev, 0);
3483 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3484 if (!bnad->bar0) {
3485 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003486 return -ENOMEM;
3487 }
3488 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3489 (unsigned long long) bnad->mmio_len);
3490
3491 spin_lock_irqsave(&bnad->bna_lock, flags);
3492 if (!bnad_msix_disable)
3493 bnad->cfg_flags = BNAD_CF_MSIX;
3494
3495 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3496
3497 bnad_q_num_init(bnad);
3498 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3499
3500 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3501 (bnad->num_rx * bnad->num_rxp_per_rx) +
3502 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003503
3504 bnad->txq_depth = BNAD_TXQ_DEPTH;
3505 bnad->rxq_depth = BNAD_RXQ_DEPTH;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003506
3507 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3508 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3509
Jing Huang01b54b12012-04-04 05:43:18 +00003510 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3511 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
Wei Yongjunba21fc62013-05-13 04:26:06 +00003512 if (!bnad->work_q) {
3513 iounmap(bnad->bar0);
Jing Huang01b54b12012-04-04 05:43:18 +00003514 return -ENOMEM;
Wei Yongjunba21fc62013-05-13 04:26:06 +00003515 }
Jing Huang01b54b12012-04-04 05:43:18 +00003516
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003517 return 0;
3518}
3519
3520/*
3521 * Must be called after bnad_pci_uninit()
3522 * so that iounmap() and pci_set_drvdata(NULL)
3523 * happens only after PCI uninitialization.
3524 */
3525static void
3526bnad_uninit(struct bnad *bnad)
3527{
Jing Huang01b54b12012-04-04 05:43:18 +00003528 if (bnad->work_q) {
3529 flush_workqueue(bnad->work_q);
3530 destroy_workqueue(bnad->work_q);
3531 bnad->work_q = NULL;
3532 }
3533
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003534 if (bnad->bar0)
3535 iounmap(bnad->bar0);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003536}
3537
3538/*
3539 * Initialize locks
Rasesh Mody078086f2011-08-08 16:21:39 +00003540 a) Per ioceth mutes used for serializing configuration
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003541 changes from OS interface
3542 b) spin lock used to protect bna state machine
3543 */
3544static void
3545bnad_lock_init(struct bnad *bnad)
3546{
3547 spin_lock_init(&bnad->bna_lock);
3548 mutex_init(&bnad->conf_mutex);
Krishna Gudipati72a97302011-12-22 13:29:45 +00003549 mutex_init(&bnad_list_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003550}
3551
3552static void
3553bnad_lock_uninit(struct bnad *bnad)
3554{
3555 mutex_destroy(&bnad->conf_mutex);
Krishna Gudipati72a97302011-12-22 13:29:45 +00003556 mutex_destroy(&bnad_list_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003557}
3558
3559/* PCI Initialization */
3560static int
3561bnad_pci_init(struct bnad *bnad,
3562 struct pci_dev *pdev, bool *using_dac)
3563{
3564 int err;
3565
3566 err = pci_enable_device(pdev);
3567 if (err)
3568 return err;
3569 err = pci_request_regions(pdev, BNAD_NAME);
3570 if (err)
3571 goto disable_device;
Russell King3e548072013-06-10 12:16:54 +01003572 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00003573 *using_dac = true;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003574 } else {
Russell King3e548072013-06-10 12:16:54 +01003575 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3576 if (err)
3577 goto release_regions;
Rusty Russell3db1cd52011-12-19 13:56:45 +00003578 *using_dac = false;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003579 }
3580 pci_set_master(pdev);
3581 return 0;
3582
3583release_regions:
3584 pci_release_regions(pdev);
3585disable_device:
3586 pci_disable_device(pdev);
3587
3588 return err;
3589}
3590
3591static void
3592bnad_pci_uninit(struct pci_dev *pdev)
3593{
3594 pci_release_regions(pdev);
3595 pci_disable_device(pdev);
3596}
3597
Bill Pembertonc4eef182012-12-03 09:23:00 -05003598static int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003599bnad_pci_probe(struct pci_dev *pdev,
3600 const struct pci_device_id *pcidev_id)
3601{
Rasesh Mody3caa1e952011-08-30 15:27:42 +00003602 bool using_dac;
Rasesh Mody0120b992011-07-22 08:07:41 +00003603 int err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003604 struct bnad *bnad;
3605 struct bna *bna;
3606 struct net_device *netdev;
3607 struct bfa_pcidev pcidev_info;
3608 unsigned long flags;
3609
3610 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3611 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3612
3613 mutex_lock(&bnad_fwimg_mutex);
3614 if (!cna_get_firmware_buf(pdev)) {
3615 mutex_unlock(&bnad_fwimg_mutex);
3616 pr_warn("Failed to load Firmware Image!\n");
3617 return -ENODEV;
3618 }
3619 mutex_unlock(&bnad_fwimg_mutex);
3620
3621 /*
3622 * Allocates sizeof(struct net_device + struct bnad)
3623 * bnad = netdev->priv
3624 */
3625 netdev = alloc_etherdev(sizeof(struct bnad));
3626 if (!netdev) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003627 err = -ENOMEM;
3628 return err;
3629 }
3630 bnad = netdev_priv(netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00003631 bnad_lock_init(bnad);
Krishna Gudipati72a97302011-12-22 13:29:45 +00003632 bnad_add_to_list(bnad);
Rasesh Mody078086f2011-08-08 16:21:39 +00003633
3634 mutex_lock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003635 /*
3636 * PCI initialization
Rasesh Mody0120b992011-07-22 08:07:41 +00003637 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003638 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003639 */
David S. Millere905ed52012-09-27 18:31:58 -04003640 using_dac = false;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003641 err = bnad_pci_init(bnad, pdev, &using_dac);
3642 if (err)
Dan Carpenter44861f42011-08-24 01:29:22 +00003643 goto unlock_mutex;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003644
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003645 /*
3646 * Initialize bnad structure
3647 * Setup relation between pci_dev & netdev
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003648 */
3649 err = bnad_init(bnad, pdev, netdev);
3650 if (err)
3651 goto pci_uninit;
Rasesh Mody078086f2011-08-08 16:21:39 +00003652
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003653 /* Initialize netdev structure, set up ethtool ops */
3654 bnad_netdev_init(bnad, using_dac);
3655
Rasesh Mody815f41e2010-12-23 21:45:03 +00003656 /* Set link to down state */
3657 netif_carrier_off(netdev);
3658
Krishna Gudipati7afc5db2011-12-22 13:30:19 +00003659 /* Setup the debugfs node for this bfad */
3660 if (bna_debugfs_enable)
3661 bnad_debugfs_init(bnad);
3662
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003663 /* Get resource requirement form bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003664 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003665 bna_res_req(&bnad->res_info[0]);
Rasesh Mody078086f2011-08-08 16:21:39 +00003666 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003667
3668 /* Allocate resources from bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003669 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003670 if (err)
Rasesh Mody078086f2011-08-08 16:21:39 +00003671 goto drv_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003672
3673 bna = &bnad->bna;
3674
3675 /* Setup pcidev_info for bna_init() */
3676 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3677 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3678 pcidev_info.device_id = bnad->pcidev->device;
3679 pcidev_info.pci_bar_kva = bnad->bar0;
3680
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003681 spin_lock_irqsave(&bnad->bna_lock, flags);
3682 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003683 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3684
3685 bnad->stats.bna_stats = &bna->stats;
3686
Rasesh Mody078086f2011-08-08 16:21:39 +00003687 bnad_enable_msix(bnad);
3688 err = bnad_mbox_irq_alloc(bnad);
3689 if (err)
3690 goto res_free;
3691
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003692 /* Set up timers */
Rasesh Mody078086f2011-08-08 16:21:39 +00003693 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003694 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003695 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003696 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003697 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
Rasesh Mody1d32f762010-12-23 21:45:09 +00003698 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003699 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003700 ((unsigned long)bnad));
3701
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003702 /*
3703 * Start the chip
Rasesh Mody078086f2011-08-08 16:21:39 +00003704 * If the call back comes with error, we bail out.
3705 * This is a catastrophic error.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003706 */
Rasesh Mody078086f2011-08-08 16:21:39 +00003707 err = bnad_ioceth_enable(bnad);
3708 if (err) {
3709 pr_err("BNA: Initialization failed err=%d\n",
3710 err);
3711 goto probe_success;
3712 }
3713
3714 spin_lock_irqsave(&bnad->bna_lock, flags);
3715 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3716 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3717 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3718 bna_attr(bna)->num_rxp - 1);
3719 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3720 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3721 err = -EIO;
3722 }
Rasesh Mody3caa1e952011-08-30 15:27:42 +00003723 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3724 if (err)
3725 goto disable_ioceth;
3726
3727 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003728 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3729 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3730
3731 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003732 if (err) {
3733 err = -EIO;
Rasesh Mody078086f2011-08-08 16:21:39 +00003734 goto disable_ioceth;
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003735 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003736
3737 spin_lock_irqsave(&bnad->bna_lock, flags);
3738 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3739 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003740
3741 /* Get the burnt-in mac */
3742 spin_lock_irqsave(&bnad->bna_lock, flags);
Ivan Vecerad6b30592015-06-11 15:52:14 +02003743 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003744 bnad_set_netdev_perm_addr(bnad);
3745 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3746
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003747 mutex_unlock(&bnad->conf_mutex);
3748
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003749 /* Finally, reguister with net_device layer */
3750 err = register_netdev(netdev);
3751 if (err) {
3752 pr_err("BNA : Registering with netdev failed\n");
Rasesh Mody078086f2011-08-08 16:21:39 +00003753 goto probe_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003754 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003755 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003756
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003757 return 0;
3758
Rasesh Mody078086f2011-08-08 16:21:39 +00003759probe_success:
3760 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003761 return 0;
3762
Rasesh Mody078086f2011-08-08 16:21:39 +00003763probe_uninit:
Rasesh Mody3fc72372011-09-21 20:55:41 -04003764 mutex_lock(&bnad->conf_mutex);
Rasesh Mody078086f2011-08-08 16:21:39 +00003765 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3766disable_ioceth:
3767 bnad_ioceth_disable(bnad);
3768 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3769 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3770 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003771 spin_lock_irqsave(&bnad->bna_lock, flags);
3772 bna_uninit(bna);
3773 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003774 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003775 bnad_disable_msix(bnad);
Rasesh Mody078086f2011-08-08 16:21:39 +00003776res_free:
3777 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3778drv_uninit:
Krishna Gudipati7afc5db2011-12-22 13:30:19 +00003779 /* Remove the debugfs node for this bnad */
3780 kfree(bnad->regdata);
3781 bnad_debugfs_uninit(bnad);
Rasesh Mody078086f2011-08-08 16:21:39 +00003782 bnad_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003783pci_uninit:
3784 bnad_pci_uninit(pdev);
Dan Carpenter44861f42011-08-24 01:29:22 +00003785unlock_mutex:
Rasesh Mody078086f2011-08-08 16:21:39 +00003786 mutex_unlock(&bnad->conf_mutex);
Krishna Gudipati72a97302011-12-22 13:29:45 +00003787 bnad_remove_from_list(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003788 bnad_lock_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003789 free_netdev(netdev);
3790 return err;
3791}
3792
Bill Pembertonc4eef182012-12-03 09:23:00 -05003793static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003794bnad_pci_remove(struct pci_dev *pdev)
3795{
3796 struct net_device *netdev = pci_get_drvdata(pdev);
3797 struct bnad *bnad;
3798 struct bna *bna;
3799 unsigned long flags;
3800
3801 if (!netdev)
3802 return;
3803
3804 pr_info("%s bnad_pci_remove\n", netdev->name);
3805 bnad = netdev_priv(netdev);
3806 bna = &bnad->bna;
3807
Rasesh Mody078086f2011-08-08 16:21:39 +00003808 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3809 unregister_netdev(netdev);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003810
3811 mutex_lock(&bnad->conf_mutex);
Rasesh Mody078086f2011-08-08 16:21:39 +00003812 bnad_ioceth_disable(bnad);
3813 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3814 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3815 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003816 spin_lock_irqsave(&bnad->bna_lock, flags);
3817 bna_uninit(bna);
3818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003819
Rasesh Mody078086f2011-08-08 16:21:39 +00003820 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3821 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3822 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003823 bnad_disable_msix(bnad);
3824 bnad_pci_uninit(pdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00003825 mutex_unlock(&bnad->conf_mutex);
Krishna Gudipati72a97302011-12-22 13:29:45 +00003826 bnad_remove_from_list(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003827 bnad_lock_uninit(bnad);
Krishna Gudipati7afc5db2011-12-22 13:30:19 +00003828 /* Remove the debugfs node for this bnad */
3829 kfree(bnad->regdata);
3830 bnad_debugfs_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003831 bnad_uninit(bnad);
3832 free_netdev(netdev);
3833}
3834
Benoit Taine9baa3c32014-08-08 15:56:03 +02003835static const struct pci_device_id bnad_pci_id_table[] = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003836 {
3837 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3838 PCI_DEVICE_ID_BROCADE_CT),
3839 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3840 .class_mask = 0xffff00
Rasesh Mody586b2812011-09-27 10:39:08 +00003841 },
3842 {
3843 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3844 BFA_PCI_DEVICE_ID_CT2),
3845 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3846 .class_mask = 0xffff00
3847 },
3848 {0, },
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003849};
3850
3851MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3852
3853static struct pci_driver bnad_pci_driver = {
3854 .name = BNAD_NAME,
3855 .id_table = bnad_pci_id_table,
3856 .probe = bnad_pci_probe,
Bill Pembertonc4eef182012-12-03 09:23:00 -05003857 .remove = bnad_pci_remove,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003858};
3859
3860static int __init
3861bnad_module_init(void)
3862{
3863 int err;
3864
Rasesh Mody2732ba52015-02-19 16:02:31 -05003865 pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
Rasesh Mody5aad0012011-07-22 08:07:40 +00003866 BNAD_VERSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003867
Rasesh Mody8a891422010-08-25 23:00:27 -07003868 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003869
3870 err = pci_register_driver(&bnad_pci_driver);
3871 if (err < 0) {
3872 pr_err("bna : PCI registration failed in module init "
3873 "(%d)\n", err);
3874 return err;
3875 }
3876
3877 return 0;
3878}
3879
3880static void __exit
3881bnad_module_exit(void)
3882{
3883 pci_unregister_driver(&bnad_pci_driver);
Jesper Juhl294ca862012-04-09 22:50:24 +02003884 release_firmware(bfi_fw);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003885}
3886
3887module_init(bnad_module_init);
3888module_exit(bnad_module_exit);
3889
3890MODULE_AUTHOR("Brocade");
3891MODULE_LICENSE("GPL");
Rasesh Mody2732ba52015-02-19 16:02:31 -05003892MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003893MODULE_VERSION(BNAD_VERSION);
3894MODULE_FIRMWARE(CNA_FW_FILE_CT);
Rasesh Mody1bf9fd702011-09-27 10:39:07 +00003895MODULE_FIRMWARE(CNA_FW_FILE_CT2);